1 /* @(#)rm_class.c 1.48 97/12/05 SMI */
2 /* $KAME: altq_rmclass.c,v 1.18 2003/11/06 06:32:53 kjc Exp $ */
5 * Copyright (c) 1991-1997 Regents of the University of California.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the Network Research
19 * Group at Lawrence Berkeley Laboratory.
20 * 4. Neither the name of the University nor of the Laboratory may be used
21 * to endorse or promote products derived from this software without
22 * specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * LBL code modified by speer@eng.sun.com, May 1977.
37 * For questions and/or comments, please send mail to cbq@ee.lbl.gov
42 #include "opt_inet6.h"
44 #ifdef ALTQ_CBQ /* cbq is enabled by ALTQ_CBQ option in opt_altq.h */
46 #include <sys/param.h>
47 #include <sys/malloc.h>
49 #include <sys/socket.h>
50 #include <sys/systm.h>
51 #include <sys/callout.h>
52 #include <sys/errno.h>
54 #include <sys/thread.h>
55 #include <sys/thread2.h>
58 #include <net/netmsg2.h>
59 #include <net/netisr2.h>
61 #include <net/altq/altq.h>
62 #include <net/altq/altq_rmclass.h>
63 #include <net/altq/altq_rmclass_debug.h>
64 #include <net/altq/altq_red.h>
65 #include <net/altq/altq_rio.h>
68 static struct cbqtrace cbqtrace_buffer[NCBQTRACE+1];
69 static struct cbqtrace *cbqtrace_ptr = NULL;
70 static int cbqtrace_count;
77 #define reset_cutoff(ifd) { ifd->cutoff_ = RM_MAXDEPTH; }
83 static int rmc_satisfied(struct rm_class *, struct timeval *);
84 static void rmc_wrr_set_weights(struct rm_ifdat *);
85 static void rmc_depth_compute(struct rm_class *);
86 static void rmc_depth_recompute(rm_class_t *);
88 static struct mbuf *_rmc_wrr_dequeue_next(struct rm_ifdat *, int);
89 static struct mbuf *_rmc_prr_dequeue_next(struct rm_ifdat *, int);
91 static int _rmc_addq(rm_class_t *, struct mbuf *);
92 static void _rmc_dropq(rm_class_t *);
93 static struct mbuf *_rmc_getq(rm_class_t *);
94 static struct mbuf *_rmc_pollq(rm_class_t *);
96 static int rmc_under_limit(struct rm_class *, struct timeval *);
97 static void rmc_tl_satisfied(struct rm_ifdat *, struct timeval *);
98 static void rmc_drop_action(struct rm_class *);
99 static void rmc_restart(void *);
100 static void rmc_restart_dispatch(netmsg_t);
101 static void rmc_root_overlimit(struct rm_class *, struct rm_class *);
103 #define BORROW_OFFTIME
105 * BORROW_OFFTIME (experimental):
106 * borrow the offtime of the class borrowing from.
107 * the reason is that when its own offtime is set, the class is unable
108 * to borrow much, especially when cutoff is taking effect.
109 * but when the borrowed class is overloaded (advidle is close to minidle),
110 * use the borrowing class's offtime to avoid overload.
112 #define ADJUST_CUTOFF
114 * ADJUST_CUTOFF (experimental):
115 * if no underlimit class is found due to cutoff, increase cutoff and
116 * retry the scheduling loop.
117 * also, don't invoke delay_actions while cutoff is taking effect,
118 * since a sleeping class won't have a chance to be scheduled in the
121 * now heuristics for setting the top-level variable (cutoff_) becomes:
122 * 1. if a packet arrives for a not-overlimit class, set cutoff
123 * to the depth of the class.
124 * 2. if cutoff is i, and a packet arrives for an overlimit class
125 * with an underlimit ancestor at a lower level than i (say j),
126 * then set cutoff to j.
127 * 3. at scheduling a packet, if there is no underlimit class
128 * due to the current cutoff level, increase cutoff by 1 and
129 * then try to schedule again.
134 * rmc_newclass(...) - Create a new resource management class at priority
135 * 'pri' on the interface given by 'ifd'.
137 * nsecPerByte is the data rate of the interface in nanoseconds/byte.
138 * E.g., 800 for a 10Mb/s ethernet. If the class gets less
139 * than 100% of the bandwidth, this number should be the
140 * 'effective' rate for the class. Let f be the
141 * bandwidth fraction allocated to this class, and let
142 * nsPerByte be the data rate of the output link in
143 * nanoseconds/byte. Then nsecPerByte is set to
144 * nsPerByte / f. E.g., 1600 (= 800 / .5)
145 * for a class that gets 50% of an ethernet's bandwidth.
147 * action the routine to call when the class is over limit.
149 * maxq max allowable queue size for class (in packets).
151 * parent parent class pointer.
153 * borrow class to borrow from (should be either 'parent' or null).
155 * maxidle max value allowed for class 'idle' time estimate (this
156 * parameter determines how large an initial burst of packets
157 * can be before overlimit action is invoked.
159 * offtime how long 'delay' action will delay when class goes over
160 * limit (this parameter determines the steady-state burst
161 * size when a class is running over its limit).
163 * Maxidle and offtime have to be computed from the following: If the
164 * average packet size is s, the bandwidth fraction allocated to this
165 * class is f, we want to allow b packet bursts, and the gain of the
166 * averaging filter is g (= 1 - 2^(-RM_FILTER_GAIN)), then:
168 * ptime = s * nsPerByte * (1 - f) / f
169 * maxidle = ptime * (1 - g^b) / g^b
170 * minidle = -ptime * (1 / (f - 1))
171 * offtime = ptime * (1 + 1/(1 - g) * (1 - g^(b - 1)) / g^(b - 1)
173 * Operationally, it's convenient to specify maxidle & offtime in units
174 * independent of the link bandwidth so the maxidle & offtime passed to
175 * this routine are the above values multiplied by 8*f/(1000*nsPerByte).
176 * (The constant factor is a scale factor needed to make the parameters
177 * integers. This scaling also means that the 'unscaled' values of
178 * maxidle*nsecPerByte/8 and offtime*nsecPerByte/8 will be in microseconds,
179 * not nanoseconds.) Also note that the 'idle' filter computation keeps
180 * an estimate scaled upward by 2^RM_FILTER_GAIN so the passed value of
181 * maxidle also must be scaled upward by this value. Thus, the passed
182 * values for maxidle and offtime can be computed as follows:
184 * maxidle = maxidle * 2^RM_FILTER_GAIN * 8 / (1000 * nsecPerByte)
185 * offtime = offtime * 8 / (1000 * nsecPerByte)
187 * When USE_HRTIME is employed, then maxidle and offtime become:
188 * maxidle = maxilde * (8.0 / nsecPerByte);
189 * offtime = offtime * (8.0 / nsecPerByte);
192 rmc_newclass(int pri, struct rm_ifdat *ifd, u_int nsecPerByte,
193 void (*action)(rm_class_t *, rm_class_t *), int maxq,
194 struct rm_class *parent, struct rm_class *borrow, u_int maxidle,
195 int minidle, u_int offtime, int pktsize, int flags)
198 struct rm_class *peer;
200 if (pri >= RM_MAXPRIO)
203 if (flags & RMCF_RED) {
205 kprintf("rmc_newclass: RED not configured for CBQ!\n");
211 if (flags & RMCF_RIO) {
213 kprintf("rmc_newclass: RIO not configured for CBQ!\n");
219 cl = kmalloc(sizeof(*cl), M_ALTQ, M_WAITOK | M_ZERO);
220 callout_init(&cl->callout_);
221 netmsg_init(&cl->callout_nmsg_, NULL, &netisr_adone_rport,
222 MSGF_PRIORITY, rmc_restart_dispatch);
223 cl->callout_nmsg_.lmsg.u.ms_resultp = cl;
225 cl->q_ = kmalloc(sizeof(*cl->q_), M_ALTQ, M_WAITOK | M_ZERO);
228 * Class initialization.
230 cl->children_ = NULL;
231 cl->parent_ = parent;
232 cl->borrow_ = borrow;
236 cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
239 cl->ns_per_byte_ = nsecPerByte;
241 qlimit(cl->q_) = maxq;
242 qtype(cl->q_) = Q_DROPHEAD;
246 #if 1 /* minidle is also scaled in ALTQ */
247 cl->minidle_ = (minidle * (int)nsecPerByte) / 8;
248 if (cl->minidle_ > 0)
251 cl->minidle_ = minidle;
253 cl->maxidle_ = (maxidle * nsecPerByte) / 8;
254 if (cl->maxidle_ == 0)
256 #if 1 /* offtime is also scaled in ALTQ */
257 cl->avgidle_ = cl->maxidle_;
258 cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
259 if (cl->offtime_ == 0)
263 cl->offtime_ = (offtime * nsecPerByte) / 8;
265 cl->overlimit = action;
268 if (flags & (RMCF_RED|RMCF_RIO)) {
269 int red_flags, red_pkttime;
272 if (flags & RMCF_ECN)
273 red_flags |= REDF_ECN;
275 if (flags & RMCF_CLEARDSCP)
276 red_flags |= RIOF_CLEARDSCP;
278 red_pkttime = nsecPerByte * pktsize / 1000;
280 if (flags & RMCF_RED) {
281 cl->red_ = red_alloc(0, 0,
282 qlimit(cl->q_) * 10/100,
283 qlimit(cl->q_) * 30/100,
284 red_flags, red_pkttime);
285 if (cl->red_ != NULL)
286 qtype(cl->q_) = Q_RED;
290 cl->red_ = (red_t *)rio_alloc(0, NULL,
291 red_flags, red_pkttime);
292 if (cl->red_ != NULL)
293 qtype(cl->q_) = Q_RIO;
297 #endif /* ALTQ_RED */
300 * put the class into the class tree
303 if ((peer = ifd->active_[pri]) != NULL) {
304 /* find the last class at this pri */
306 while (peer->peer_ != ifd->active_[pri])
310 ifd->active_[pri] = cl;
315 cl->next_ = parent->children_;
316 parent->children_ = cl;
321 * Compute the depth of this class and its ancestors in the class
324 rmc_depth_compute(cl);
327 * If CBQ's WRR is enabled, then initialize the class WRR state.
331 ifd->alloc_[pri] += cl->allotment_;
332 rmc_wrr_set_weights(ifd);
339 rmc_modclass(struct rm_class *cl, u_int nsecPerByte, int maxq, u_int maxidle,
340 int minidle, u_int offtime, int pktsize)
342 struct rm_ifdat *ifd;
346 old_allotment = cl->allotment_;
349 cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
351 cl->ns_per_byte_ = nsecPerByte;
353 qlimit(cl->q_) = maxq;
355 #if 1 /* minidle is also scaled in ALTQ */
356 cl->minidle_ = (minidle * nsecPerByte) / 8;
357 if (cl->minidle_ > 0)
360 cl->minidle_ = minidle;
362 cl->maxidle_ = (maxidle * nsecPerByte) / 8;
363 if (cl->maxidle_ == 0)
365 #if 1 /* offtime is also scaled in ALTQ */
366 cl->avgidle_ = cl->maxidle_;
367 cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
368 if (cl->offtime_ == 0)
372 cl->offtime_ = (offtime * nsecPerByte) / 8;
376 * If CBQ's WRR is enabled, then initialize the class WRR state.
379 ifd->alloc_[cl->pri_] += cl->allotment_ - old_allotment;
380 rmc_wrr_set_weights(ifd);
388 * rmc_wrr_set_weights(struct rm_ifdat *ifdat) - This function computes
389 * the appropriate run robin weights for the CBQ weighted round robin
396 rmc_wrr_set_weights(struct rm_ifdat *ifd)
399 struct rm_class *cl, *clh;
401 for (i = 0; i < RM_MAXPRIO; i++) {
403 * This is inverted from that of the simulator to
404 * maintain precision.
406 if (ifd->num_[i] == 0)
409 ifd->M_[i] = ifd->alloc_[i] /
410 (ifd->num_[i] * ifd->maxpkt_);
412 * Compute the weighted allotment for each class.
413 * This takes the expensive div instruction out
414 * of the main loop for the wrr scheduling path.
415 * These only get recomputed when a class comes or
418 if (ifd->active_[i] != NULL) {
419 clh = cl = ifd->active_[i];
421 /* safe-guard for slow link or alloc_ == 0 */
423 cl->w_allotment_ = 0;
425 cl->w_allotment_ = cl->allotment_ /
428 } while ((cl != NULL) && (cl != clh));
434 rmc_get_weight(struct rm_ifdat *ifd, int pri)
436 if ((pri >= 0) && (pri < RM_MAXPRIO))
437 return (ifd->M_[pri]);
444 * rmc_depth_compute(struct rm_class *cl) - This function computes the
445 * appropriate depth of class 'cl' and its ancestors.
451 rmc_depth_compute(struct rm_class *cl)
453 rm_class_t *t = cl, *p;
456 * Recompute the depth for the branch of the tree.
460 if (p && (t->depth_ >= p->depth_)) {
461 p->depth_ = t->depth_ + 1;
470 * rmc_depth_recompute(struct rm_class *cl) - This function re-computes
471 * the depth of the tree after a class has been deleted.
477 rmc_depth_recompute(rm_class_t *cl)
484 if ((t = p->children_) == NULL) {
490 if (t->depth_ > cdepth)
495 if (p->depth_ == cdepth + 1)
496 /* no change to this parent */
499 p->depth_ = cdepth + 1;
507 if (cl->depth_ >= 1) {
508 if (cl->children_ == NULL) {
510 } else if ((t = cl->children_) != NULL) {
512 if (t->children_ != NULL)
513 rmc_depth_recompute(t);
517 rmc_depth_compute(cl);
524 * rmc_delete_class(struct rm_ifdat *ifdat, struct rm_class *cl) - This
525 * function deletes a class from the link-sharing structure and frees
526 * all resources associated with the class.
532 rmc_delete_class(struct rm_ifdat *ifd, struct rm_class *cl)
534 struct rm_class *p, *head, *previous;
535 struct netmsg_base smsg;
536 struct ifaltq_subque *ifsq =
537 &ifd->ifq_->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT];
539 KKASSERT(cl->children_ == NULL);
541 ALTQ_SQ_ASSERT_LOCKED(ifsq);
542 ALTQ_SQ_UNLOCK(ifsq);
543 callout_stop_sync(&cl->callout_);
544 /* Make sure that cl->callout_nmsg_ stops. */
545 netmsg_init(&smsg, NULL, &curthread->td_msgport, 0,
546 netmsg_sync_handler);
547 lwkt_domsg(netisr_cpuport(0), &smsg.lmsg, 0);
552 if (ifd->pollcache_ == cl)
553 ifd->pollcache_ = NULL;
556 * Free packets in the packet queue.
557 * XXX - this may not be a desired behavior. Packets should be
563 * If the class has a parent, then remove the class from the
564 * class from the parent's children chain.
566 if (cl->parent_ != NULL) {
567 head = cl->parent_->children_;
569 if (head->next_ == NULL) {
570 KKASSERT(head == cl);
571 cl->parent_->children_ = NULL;
572 cl->parent_->leaf_ = 1;
573 } else while (p != NULL) {
576 cl->parent_->children_ = cl->next_;
578 previous->next_ = cl->next_;
589 * Delete class from class priority peer list.
591 if ((p = ifd->active_[cl->pri_]) != NULL) {
593 * If there is more than one member of this priority
594 * level, then look for class(cl) in the priority level.
597 while (p->peer_ != cl)
599 p->peer_ = cl->peer_;
601 if (ifd->active_[cl->pri_] == cl)
602 ifd->active_[cl->pri_] = cl->peer_;
605 ifd->active_[cl->pri_] = NULL;
610 * Recompute the WRR weights.
613 ifd->alloc_[cl->pri_] -= cl->allotment_;
614 ifd->num_[cl->pri_]--;
615 rmc_wrr_set_weights(ifd);
619 * Re-compute the depth of the tree.
622 rmc_depth_recompute(cl->parent_);
624 rmc_depth_recompute(ifd->root_);
630 * Free the class structure.
632 if (cl->red_ != NULL) {
634 if (q_is_rio(cl->q_))
635 rio_destroy((rio_t *)cl->red_);
638 if (q_is_red(cl->q_))
639 red_destroy(cl->red_);
642 kfree(cl->q_, M_ALTQ);
648 * rmc_init(...) - Initialize the resource management data structures
649 * associated with the output portion of interface 'ifp'. 'ifd' is
650 * where the structures will be built (for backwards compatibility, the
651 * structures aren't kept in the ifnet struct). 'nsecPerByte'
652 * gives the link speed (inverse of bandwidth) in nanoseconds/byte.
653 * 'restart' is the driver-specific routine that the generic 'delay
654 * until under limit' action will call to restart output. `maxq'
655 * is the queue size of the 'link' & 'default' classes. 'maxqueued'
656 * is the maximum number of packets that the resource management
657 * code will allow to be queued 'downstream' (this is typically 1).
663 rmc_init(struct ifaltq *ifq, struct rm_ifdat *ifd, u_int nsecPerByte,
664 void (*restart)(struct ifaltq *), int maxq, int maxqueued, u_int maxidle,
665 int minidle, u_int offtime, int flags)
670 * Initialize the CBQ tracing/debug facility.
674 bzero(ifd, sizeof (*ifd));
675 mtu = ifq->altq_ifp->if_mtu;
677 ifd->restart = restart;
678 ifd->maxqueued_ = maxqueued;
679 ifd->ns_per_byte_ = nsecPerByte;
681 ifd->wrr_ = (flags & RMCF_WRR) ? 1 : 0;
682 ifd->efficient_ = (flags & RMCF_EFFICIENT) ? 1 : 0;
684 ifd->maxiftime_ = mtu * nsecPerByte / 1000 * 16;
685 if (mtu * nsecPerByte > 10 * 1000000)
686 ifd->maxiftime_ /= 4;
690 CBQTRACE(rmc_init, 'INIT', ifd->cutoff_);
693 * Initialize the CBQ's WRR state.
695 for (i = 0; i < RM_MAXPRIO; i++) {
700 ifd->active_[i] = NULL;
704 * Initialize current packet state.
708 for (i = 0; i < RM_MAXQUEUED; i++) {
709 ifd->class_[i] = NULL;
711 ifd->borrowed_[i] = NULL;
715 * Create the root class of the link-sharing structure.
717 ifd->root_ = rmc_newclass(0, ifd, nsecPerByte, rmc_root_overlimit,
718 maxq, 0, 0, maxidle, minidle, offtime, 0, 0);
719 if (ifd->root_ == NULL) {
720 kprintf("rmc_init: root class not allocated\n");
723 ifd->root_->depth_ = 0;
728 * rmc_queue_packet(struct rm_class *cl, struct mbuf *m) - Add packet given by
729 * mbuf 'm' to queue for resource class 'cl'. This routine is called
730 * by a driver's if_output routine. This routine must be called with
731 * output packet completion interrupts locked out (to avoid racing with
734 * Returns: 0 on successful queueing
735 * -1 when packet drop occurs
738 rmc_queue_packet(struct rm_class *cl, struct mbuf *m)
741 struct rm_ifdat *ifd = cl->ifdat_;
743 int is_empty = qempty(cl->q_);
746 if (ifd->cutoff_ > 0) {
747 if (TV_LT(&cl->undertime_, &now)) {
748 if (ifd->cutoff_ > cl->depth_)
749 ifd->cutoff_ = cl->depth_;
750 CBQTRACE(rmc_queue_packet, 'ffoc', cl->depth_);
755 * the class is overlimit. if the class has
756 * underlimit ancestors, set cutoff to the lowest
759 struct rm_class *borrow = cl->borrow_;
761 while (borrow != NULL &&
762 borrow->depth_ < ifd->cutoff_) {
763 if (TV_LT(&borrow->undertime_, &now)) {
764 ifd->cutoff_ = borrow->depth_;
765 CBQTRACE(rmc_queue_packet, 'ffob', ifd->cutoff_);
768 borrow = borrow->borrow_;
772 else if ((ifd->cutoff_ > 1) && cl->borrow_) {
773 if (TV_LT(&cl->borrow_->undertime_, &now)) {
774 ifd->cutoff_ = cl->borrow_->depth_;
775 CBQTRACE(rmc_queue_packet, 'ffob',
776 cl->borrow_->depth_);
782 if (_rmc_addq(cl, m) < 0)
787 CBQTRACE(rmc_queue_packet, 'ytpe', cl->stats_.handle);
791 if (qlen(cl->q_) > qlimit(cl->q_)) {
792 /* note: qlimit can be set to 0 or 1 */
801 * rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now) - Check all
802 * classes to see if there are satified.
806 rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now)
811 for (i = RM_MAXPRIO - 1; i >= 0; i--) {
812 if ((bp = ifd->active_[i]) != NULL) {
815 if (!rmc_satisfied(p, now)) {
816 ifd->cutoff_ = p->depth_;
828 * rmc_satisfied - Return 1 of the class is satisfied. O, otherwise.
832 rmc_satisfied(struct rm_class *cl, struct timeval *now)
838 if (TV_LT(now, &cl->undertime_))
840 if (cl->depth_ == 0) {
841 if (!cl->sleeping_ && (qlen(cl->q_) > cl->qthresh_))
846 if (cl->children_ != NULL) {
849 if (!rmc_satisfied(p, now))
859 * Return 1 if class 'cl' is under limit or can borrow from a parent,
860 * 0 if overlimit. As a side-effect, this routine will invoke the
861 * class overlimit action if the class if overlimit.
865 rmc_under_limit(struct rm_class *cl, struct timeval *now)
869 struct rm_ifdat *ifd = cl->ifdat_;
871 ifd->borrowed_[ifd->qi_] = NULL;
873 * If cl is the root class, then always return that it is
874 * underlimit. Otherwise, check to see if the class is underlimit.
876 if (cl->parent_ == NULL)
880 if (TV_LT(now, &cl->undertime_))
883 callout_stop(&cl->callout_);
885 cl->undertime_.tv_sec = 0;
890 while (cl->undertime_.tv_sec && TV_LT(now, &cl->undertime_)) {
891 if (((cl = cl->borrow_) == NULL) ||
892 (cl->depth_ > ifd->cutoff_)) {
895 /* cutoff is taking effect, just
896 return false without calling
900 #ifdef BORROW_OFFTIME
902 * check if the class can borrow offtime too.
903 * borrow offtime from the top of the borrow
904 * chain if the top class is not overloaded.
907 /* cutoff is taking effect, use this class as top. */
909 CBQTRACE(rmc_under_limit, 'ffou', ifd->cutoff_);
911 if (top != NULL && top->avgidle_ == top->minidle_)
914 (p->overlimit)(p, top);
917 (p->overlimit)(p, NULL);
925 ifd->borrowed_[ifd->qi_] = cl;
930 * _rmc_wrr_dequeue_next() - This is scheduler for WRR as opposed to
931 * Packet-by-packet round robin.
933 * The heart of the weighted round-robin scheduler, which decides which
934 * class next gets to send a packet. Highest priority first, then
935 * weighted round-robin within priorites.
937 * Each able-to-send class gets to send until its byte allocation is
938 * exhausted. Thus, the active pointer is only changed after a class has
939 * exhausted its allocation.
941 * If the scheduler finds no class that is underlimit or able to borrow,
942 * then the first class found that had a nonzero queue and is allowed to
943 * borrow gets to send.
947 _rmc_wrr_dequeue_next(struct rm_ifdat *ifd, int op)
949 struct rm_class *cl = NULL, *first = NULL;
958 * if the driver polls the top of the queue and then removes
959 * the polled packet, we must return the same packet.
961 if (op == ALTDQ_REMOVE && ifd->pollcache_) {
962 cl = ifd->pollcache_;
964 if (ifd->efficient_) {
965 /* check if this class is overlimit */
966 if (cl->undertime_.tv_sec != 0 &&
967 rmc_under_limit(cl, &now) == 0)
970 ifd->pollcache_ = NULL;
973 /* mode == ALTDQ_POLL || pollcache == NULL */
974 ifd->pollcache_ = NULL;
975 ifd->borrowed_[ifd->qi_] = NULL;
979 for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
980 if (ifd->na_[cpri] == 0)
984 * Loop through twice for a priority level, if some class
985 * was unable to send a packet the first round because
986 * of the weighted round-robin mechanism.
987 * During the second loop at this level, deficit==2.
988 * (This second loop is not needed if for every class,
989 * "M[cl->pri_])" times "cl->allotment" is greater than
990 * the byte size for the largest packet in the class.)
993 cl = ifd->active_[cpri];
994 KKASSERT(cl != NULL);
996 if ((deficit < 2) && (cl->bytes_alloc_ <= 0))
997 cl->bytes_alloc_ += cl->w_allotment_;
998 if (!qempty(cl->q_)) {
999 if ((cl->undertime_.tv_sec == 0) ||
1000 rmc_under_limit(cl, &now)) {
1001 if (cl->bytes_alloc_ > 0 || deficit > 1)
1004 /* underlimit but no alloc */
1007 ifd->borrowed_[ifd->qi_] = NULL;
1010 else if (first == NULL && cl->borrow_ != NULL)
1011 first = cl; /* borrowing candidate */
1014 cl->bytes_alloc_ = 0;
1016 } while (cl != ifd->active_[cpri]);
1019 /* first loop found an underlimit class with deficit */
1020 /* Loop on same priority level, with new deficit. */
1026 #ifdef ADJUST_CUTOFF
1028 * no underlimit class found. if cutoff is taking effect,
1029 * increase cutoff and try again.
1031 if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1033 CBQTRACE(_rmc_wrr_dequeue_next, 'ojda', ifd->cutoff_);
1036 #endif /* ADJUST_CUTOFF */
1038 * If LINK_EFFICIENCY is turned on, then the first overlimit
1039 * class we encounter will send a packet if all the classes
1040 * of the link-sharing structure are overlimit.
1043 CBQTRACE(_rmc_wrr_dequeue_next, 'otsr', ifd->cutoff_);
1045 if (!ifd->efficient_ || first == NULL)
1050 #if 0 /* too time-consuming for nothing */
1052 callout_stop(&cl->callout_);
1054 cl->undertime_.tv_sec = 0;
1056 ifd->borrowed_[ifd->qi_] = cl->borrow_;
1057 ifd->cutoff_ = cl->borrow_->depth_;
1060 * Deque the packet and do the book keeping...
1063 if (op == ALTDQ_REMOVE) {
1066 panic("_rmc_wrr_dequeue_next");
1071 * Update class statistics and link data.
1073 if (cl->bytes_alloc_ > 0)
1074 cl->bytes_alloc_ -= m_pktlen(m);
1076 if ((cl->bytes_alloc_ <= 0) || first == cl)
1077 ifd->active_[cl->pri_] = cl->peer_;
1079 ifd->active_[cl->pri_] = cl;
1081 ifd->class_[ifd->qi_] = cl;
1082 ifd->curlen_[ifd->qi_] = m_pktlen(m);
1083 ifd->now_[ifd->qi_] = now;
1084 ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1087 /* mode == ALTDQ_PPOLL */
1091 * Don't use poll cache; the poll/dequeue
1092 * model is no longer applicable to SMP
1100 * The dequeue at (+) will hit the poll
1101 * cache set by CPU-B.
1103 ifd->pollcache_ = cl;
1110 * Dequeue & return next packet from the highest priority class that
1111 * has a packet to send & has enough allocation to send it. This
1112 * routine is called by a driver whenever it needs a new packet to
1115 static struct mbuf *
1116 _rmc_prr_dequeue_next(struct rm_ifdat *ifd, int op)
1120 struct rm_class *cl, *first = NULL;
1126 * if the driver polls the top of the queue and then removes
1127 * the polled packet, we must return the same packet.
1129 if (op == ALTDQ_REMOVE && ifd->pollcache_) {
1130 cl = ifd->pollcache_;
1132 ifd->pollcache_ = NULL;
1135 /* mode == ALTDQ_POLL || pollcache == NULL */
1136 ifd->pollcache_ = NULL;
1137 ifd->borrowed_[ifd->qi_] = NULL;
1138 #ifdef ADJUST_CUTOFF
1141 for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
1142 if (ifd->na_[cpri] == 0)
1144 cl = ifd->active_[cpri];
1145 KKASSERT(cl != NULL);
1147 if (!qempty(cl->q_)) {
1148 if ((cl->undertime_.tv_sec == 0) ||
1149 rmc_under_limit(cl, &now))
1151 if (first == NULL && cl->borrow_ != NULL)
1155 } while (cl != ifd->active_[cpri]);
1158 #ifdef ADJUST_CUTOFF
1160 * no underlimit class found. if cutoff is taking effect, increase
1161 * cutoff and try again.
1163 if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1167 #endif /* ADJUST_CUTOFF */
1169 * If LINK_EFFICIENCY is turned on, then the first overlimit
1170 * class we encounter will send a packet if all the classes
1171 * of the link-sharing structure are overlimit.
1174 if (!ifd->efficient_ || first == NULL)
1179 #if 0 /* too time-consuming for nothing */
1181 callout_stop(&cl->callout_);
1183 cl->undertime_.tv_sec = 0;
1185 ifd->borrowed_[ifd->qi_] = cl->borrow_;
1186 ifd->cutoff_ = cl->borrow_->depth_;
1189 * Deque the packet and do the book keeping...
1192 if (op == ALTDQ_REMOVE) {
1195 panic("_rmc_prr_dequeue_next");
1199 ifd->active_[cpri] = cl->peer_;
1201 ifd->class_[ifd->qi_] = cl;
1202 ifd->curlen_[ifd->qi_] = m_pktlen(m);
1203 ifd->now_[ifd->qi_] = now;
1204 ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1207 /* mode == ALTDQ_POLL */
1211 * Don't use poll cache; the poll/dequeue
1212 * model is no longer applicable to SMP
1220 * The dequeue at (+) will hit the poll
1221 * cache set by CPU-B.
1223 ifd->pollcache_ = cl;
1231 * rmc_dequeue_next(struct rm_ifdat *ifd, struct timeval *now) - this function
1232 * is invoked by the packet driver to get the next packet to be
1233 * dequeued and output on the link. If WRR is enabled, then the
1234 * WRR dequeue next routine will determine the next packet to sent.
1235 * Otherwise, packet-by-packet round robin is invoked.
1237 * Returns: NULL, if a packet is not available or if all
1238 * classes are overlimit.
1240 * Otherwise, Pointer to the next packet.
1244 rmc_dequeue_next(struct rm_ifdat *ifd, int mode)
1246 if (ifd->queued_ >= ifd->maxqueued_)
1249 return (_rmc_wrr_dequeue_next(ifd, mode));
1251 return (_rmc_prr_dequeue_next(ifd, mode));
1255 * Update the utilization estimate for the packet that just completed.
1256 * The packet's class & the parent(s) of that class all get their
1257 * estimators updated. This routine is called by the driver's output-
1258 * packet-completion interrupt service routine.
1262 * a macro to approximate "divide by 1000" that gives 0.000999,
1263 * if a value has enough effective digits.
1264 * (on pentium, mul takes 9 cycles but div takes 46!)
1266 #define NSEC_TO_USEC(t) (((t) >> 10) + ((t) >> 16) + ((t) >> 17))
1268 rmc_update_class_util(struct rm_ifdat *ifd)
1270 int idle, avgidle, pktlen;
1271 int pkt_time, tidle;
1272 rm_class_t *cl, *borrowed;
1273 rm_class_t *borrows;
1274 struct timeval *nowp;
1277 * Get the most recent completed class.
1279 if ((cl = ifd->class_[ifd->qo_]) == NULL)
1282 pktlen = ifd->curlen_[ifd->qo_];
1283 borrowed = ifd->borrowed_[ifd->qo_];
1286 PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1289 * Run estimator on class and its ancestors.
1292 * rm_update_class_util is designed to be called when the
1293 * transfer is completed from a xmit complete interrupt,
1294 * but most drivers don't implement an upcall for that.
1295 * so, just use estimated completion time.
1296 * as a result, ifd->qi_ and ifd->qo_ are always synced.
1298 nowp = &ifd->now_[ifd->qo_];
1299 /* get pkt_time (for link) in usec */
1300 #if 1 /* use approximation */
1301 pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_;
1302 pkt_time = NSEC_TO_USEC(pkt_time);
1304 pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_ / 1000;
1306 #if 1 /* ALTQ4PPP */
1307 if (TV_LT(nowp, &ifd->ifnow_)) {
1311 * make sure the estimated completion time does not go
1312 * too far. it can happen when the link layer supports
1313 * data compression or the interface speed is set to
1314 * a much lower value.
1316 TV_DELTA(&ifd->ifnow_, nowp, iftime);
1317 if (iftime+pkt_time < ifd->maxiftime_) {
1318 TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1320 TV_ADD_DELTA(nowp, ifd->maxiftime_, &ifd->ifnow_);
1323 TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1326 if (TV_LT(nowp, &ifd->ifnow_)) {
1327 TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1329 TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1333 while (cl != NULL) {
1334 TV_DELTA(&ifd->ifnow_, &cl->last_, idle);
1335 if (idle >= 2000000)
1337 * this class is idle enough, reset avgidle.
1338 * (TV_DELTA returns 2000000 us when delta is large.)
1340 cl->avgidle_ = cl->maxidle_;
1342 /* get pkt_time (for class) in usec */
1343 #if 1 /* use approximation */
1344 pkt_time = pktlen * cl->ns_per_byte_;
1345 pkt_time = NSEC_TO_USEC(pkt_time);
1347 pkt_time = pktlen * cl->ns_per_byte_ / 1000;
1351 avgidle = cl->avgidle_;
1352 avgidle += idle - (avgidle >> RM_FILTER_GAIN);
1353 cl->avgidle_ = avgidle;
1355 /* Are we overlimit ? */
1357 CBQTRACE(rmc_update_class_util, 'milo', cl->stats_.handle);
1360 * need some lower bound for avgidle, otherwise
1361 * a borrowing class gets unbounded penalty.
1363 if (avgidle < cl->minidle_)
1364 avgidle = cl->avgidle_ = cl->minidle_;
1366 /* set next idle to make avgidle 0 */
1368 (((1 - RM_POWER) * avgidle) >> RM_FILTER_GAIN);
1369 TV_ADD_DELTA(nowp, tidle, &cl->undertime_);
1373 (avgidle > cl->maxidle_) ? cl->maxidle_ : avgidle;
1374 cl->undertime_.tv_sec = 0;
1375 if (cl->sleeping_) {
1376 callout_stop(&cl->callout_);
1381 if (borrows != NULL) {
1383 ++cl->stats_.borrows;
1387 cl->last_ = ifd->ifnow_;
1388 cl->last_pkttime_ = pkt_time;
1391 if (cl->parent_ == NULL) {
1392 /* take stats of root class */
1393 PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1401 * Check to see if cutoff needs to set to a new level.
1403 cl = ifd->class_[ifd->qo_];
1404 if (borrowed && (ifd->cutoff_ >= borrowed->depth_)) {
1406 if ((qlen(cl->q_) <= 0) || TV_LT(nowp, &borrowed->undertime_)) {
1407 rmc_tl_satisfied(ifd, nowp);
1408 CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
1410 ifd->cutoff_ = borrowed->depth_;
1411 CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
1414 if ((qlen(cl->q_) <= 1) || TV_LT(&now, &borrowed->undertime_)) {
1417 rmc_tl_satisfied(ifd, &now);
1419 CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
1421 ifd->cutoff_ = borrowed->depth_;
1422 CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
1428 * Release class slot
1430 ifd->borrowed_[ifd->qo_] = NULL;
1431 ifd->class_[ifd->qo_] = NULL;
1432 ifd->qo_ = (ifd->qo_ + 1) % ifd->maxqueued_;
1438 * rmc_drop_action(struct rm_class *cl) - Generic (not protocol-specific)
1439 * over-limit action routines. These get invoked by rmc_under_limit()
1440 * if a class with packets to send if over its bandwidth limit & can't
1441 * borrow from a parent class.
1447 rmc_drop_action(struct rm_class *cl)
1449 struct rm_ifdat *ifd = cl->ifdat_;
1451 KKASSERT(qlen(cl->q_) > 0);
1454 ifd->na_[cl->pri_]--;
1458 rmc_dropall(struct rm_class *cl)
1460 struct rm_ifdat *ifd = cl->ifdat_;
1462 if (!qempty(cl->q_)) {
1465 ifd->na_[cl->pri_]--;
1471 * rmc_delay_action(struct rm_class *cl) - This function is the generic CBQ
1472 * delay action routine. It is invoked via rmc_under_limit when the
1473 * packet is discoverd to be overlimit.
1475 * If the delay action is result of borrow class being overlimit, then
1476 * delay for the offtime of the borrowing class that is overlimit.
1482 rmc_delay_action(struct rm_class *cl, struct rm_class *borrow)
1484 int delay, t, extradelay;
1486 cl->stats_.overactions++;
1487 TV_DELTA(&cl->undertime_, &cl->overtime_, delay);
1488 #ifndef BORROW_OFFTIME
1489 delay += cl->offtime_;
1492 if (!cl->sleeping_) {
1493 CBQTRACE(rmc_delay_action, 'yled', cl->stats_.handle);
1494 #ifdef BORROW_OFFTIME
1496 extradelay = borrow->offtime_;
1499 extradelay = cl->offtime_;
1503 * XXX recalculate suspend time:
1504 * current undertime is (tidle + pkt_time) calculated
1505 * from the last transmission.
1506 * tidle: time required to bring avgidle back to 0
1507 * pkt_time: target waiting time for this class
1508 * we need to replace pkt_time by offtime
1510 extradelay -= cl->last_pkttime_;
1512 if (extradelay > 0) {
1513 TV_ADD_DELTA(&cl->undertime_, extradelay, &cl->undertime_);
1514 delay += extradelay;
1518 cl->stats_.delays++;
1521 * Since packets are phased randomly with respect to the
1522 * clock, 1 tick (the next clock tick) can be an arbitrarily
1523 * short time so we have to wait for at least two ticks.
1524 * NOTE: If there's no other traffic, we need the timer as
1525 * a 'backstop' to restart this class.
1527 if (delay > ustick * 2)
1528 t = (delay + ustick - 1) / ustick;
1531 callout_reset_bycpu(&cl->callout_, t, rmc_restart, cl, 0);
1537 * rmc_restart() - is just a helper routine for rmc_delay_action -- it is
1538 * called by the system timer code & is responsible checking if the
1539 * class is still sleeping (it might have been restarted as a side
1540 * effect of the queue scan on a packet arrival) and, if so, restarting
1541 * output for the class. Inspecting the class state & restarting output
1542 * require locking the class structure. In general the driver is
1543 * responsible for locking but this is the only routine that is not
1544 * called directly or indirectly from the interface driver so it has
1545 * know about system locking conventions. Under bsd, locking is done
1546 * by raising IPL to splimp so that's what's implemented here. On a
1547 * different system this would probably need to be changed.
1549 * Since this function is called from an independant timeout, we
1550 * have to set up the lock conditions expected for the ALTQ operation.
1551 * Note that the restart will probably fall through to an if_start.
1557 rmc_restart_dispatch(netmsg_t nmsg)
1559 struct rm_class *cl = nmsg->lmsg.u.ms_resultp;
1560 struct rm_ifdat *ifd = cl->ifdat_;
1561 struct ifaltq_subque *ifsq =
1562 &ifd->ifq_->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT];
1567 lwkt_replymsg(&nmsg->lmsg, 0); /* reply ASAP */
1571 if (cl->sleeping_) {
1573 cl->undertime_.tv_sec = 0;
1575 if (ifd->queued_ < ifd->maxqueued_ && ifd->restart != NULL) {
1576 CBQTRACE(rmc_restart, 'trts', cl->stats_.handle);
1577 (ifd->restart)(ifd->ifq_);
1580 ALTQ_SQ_UNLOCK(ifsq);
1584 rmc_restart(void *xcl)
1586 struct rm_class *cl = xcl;
1587 struct lwkt_msg *lmsg = &cl->callout_nmsg_.lmsg;
1589 KASSERT(mycpuid == 0, ("not on cpu0"));
1591 if (lmsg->ms_flags & MSGF_DONE)
1592 lwkt_sendmsg_oncpu(netisr_cpuport(0), lmsg);
1598 * rmc_root_overlimit(struct rm_class *cl) - This the generic overlimit
1599 * handling routine for the root class of the link sharing structure.
1605 rmc_root_overlimit(struct rm_class *cl, struct rm_class *borrow)
1607 panic("rmc_root_overlimit");
1611 * Packet Queue handling routines. Eventually, this is to localize the
1612 * effects on the code whether queues are red queues or droptail
1617 _rmc_addq(rm_class_t *cl, struct mbuf *m)
1620 if (q_is_rio(cl->q_))
1621 return rio_addq((rio_t *)cl->red_, cl->q_, m, cl->pktattr_);
1624 if (q_is_red(cl->q_))
1625 return red_addq(cl->red_, cl->q_, m, cl->pktattr_);
1626 #endif /* ALTQ_RED */
1628 if (cl->flags_ & RMCF_CLEARDSCP)
1629 write_dsfield(m, cl->pktattr_, 0);
1635 /* note: _rmc_dropq is not called for red */
1637 _rmc_dropq(rm_class_t *cl)
1641 if ((m = _getq(cl->q_)) != NULL)
1645 static struct mbuf *
1646 _rmc_getq(rm_class_t *cl)
1649 if (q_is_rio(cl->q_))
1650 return rio_getq((rio_t *)cl->red_, cl->q_);
1653 if (q_is_red(cl->q_))
1654 return red_getq(cl->red_, cl->q_);
1656 return _getq(cl->q_);
1659 static struct mbuf *
1660 _rmc_pollq(rm_class_t *cl)
1662 return qhead(cl->q_);
1667 * DDB hook to trace cbq events:
1668 * the last 1024 events are held in a circular buffer.
1669 * use "call cbqtrace_dump(N)" to display 20 events from Nth event.
1671 void cbqtrace_dump(int);
1672 static char *rmc_funcname(void *);
1674 static struct rmc_funcs {
1678 rmc_init, "rmc_init",
1679 rmc_queue_packet, "rmc_queue_packet",
1680 rmc_under_limit, "rmc_under_limit",
1681 rmc_update_class_util, "rmc_update_class_util",
1682 rmc_delay_action, "rmc_delay_action",
1683 rmc_restart, "rmc_restart",
1684 _rmc_wrr_dequeue_next, "_rmc_wrr_dequeue_next",
1689 rmc_funcname(void *func)
1691 struct rmc_funcs *fp;
1693 for (fp = rmc_funcs; fp->func != NULL; fp++) {
1694 if (fp->func == func)
1702 cbqtrace_dump(int counter)
1707 counter = counter % NCBQTRACE;
1708 p = (int *)&cbqtrace_buffer[counter];
1710 for (i=0; i<20; i++) {
1711 kprintf("[0x%x] ", *p++);
1712 kprintf("%s: ", rmc_funcname((void *)*p++));
1714 kprintf("%c%c%c%c: ", cp[0], cp[1], cp[2], cp[3]);
1715 kprintf("%d\n",*p++);
1717 if (p >= (int *)&cbqtrace_buffer[NCBQTRACE])
1718 p = (int *)cbqtrace_buffer;
1721 #endif /* CBQ_TRACE */
1722 #endif /* ALTQ_CBQ */