1 /* $KAME: altq_hfsc.c,v 1.25 2004/04/17 10:54:48 kjc Exp $ */
4 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
6 * Permission to use, copy, modify, and distribute this software and
7 * its documentation is hereby granted (including for commercial or
8 * for-profit use), provided that both the copyright notice and this
9 * permission notice appear in all copies of the software, derivative
10 * works, or modified versions, and any portions thereof.
12 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
13 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
14 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
20 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
21 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
24 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
27 * Carnegie Mellon encourages (but does not require) users of this
28 * software to return any improvements or extensions that they make,
29 * and to grant Carnegie Mellon the rights to redistribute these
30 * changes without encumbrance.
33 * H-FSC is described in Proceedings of SIGCOMM'97,
34 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
35 * Real-Time and Priority Service"
36 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
38 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
39 * when a class has an upperlimit, the fit-time is computed from the
40 * upperlimit service curve. the link-sharing scheduler does not schedule
41 * a class whose fit-time exceeds the current time.
46 #include "opt_inet6.h"
48 #ifdef ALTQ_HFSC /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */
50 #include <sys/param.h>
51 #include <sys/malloc.h>
53 #include <sys/socket.h>
54 #include <sys/systm.h>
55 #include <sys/errno.h>
56 #include <sys/queue.h>
57 #include <sys/thread.h>
60 #include <net/ifq_var.h>
61 #include <netinet/in.h>
63 #include <net/pf/pfvar.h>
64 #include <net/altq/altq.h>
65 #include <net/altq/altq_hfsc.h>
67 #include <sys/thread2.h>
69 #define HFSC_SUBQ_INDEX ALTQ_SUBQ_INDEX_DEFAULT
70 #define HFSC_LOCK(ifq) \
71 ALTQ_SQ_LOCK(&(ifq)->altq_subq[HFSC_SUBQ_INDEX])
72 #define HFSC_UNLOCK(ifq) \
73 ALTQ_SQ_UNLOCK(&(ifq)->altq_subq[HFSC_SUBQ_INDEX])
78 static int hfsc_clear_interface(struct hfsc_if *);
79 static int hfsc_request(struct ifaltq_subque *, int, void *);
80 static void hfsc_purge(struct hfsc_if *);
81 static struct hfsc_class *hfsc_class_create(struct hfsc_if *,
82 struct service_curve *,
83 struct service_curve *,
84 struct service_curve *,
85 struct hfsc_class *, int, int, int);
86 static int hfsc_class_destroy(struct hfsc_class *);
87 static struct hfsc_class *hfsc_nextclass(struct hfsc_class *);
88 static int hfsc_enqueue(struct ifaltq_subque *, struct mbuf *,
89 struct altq_pktattr *);
90 static struct mbuf *hfsc_dequeue(struct ifaltq_subque *, struct mbuf *, int);
92 static int hfsc_addq(struct hfsc_class *, struct mbuf *);
93 static struct mbuf *hfsc_getq(struct hfsc_class *);
94 static struct mbuf *hfsc_pollq(struct hfsc_class *);
95 static void hfsc_purgeq(struct hfsc_class *);
97 static void update_cfmin(struct hfsc_class *);
98 static void set_active(struct hfsc_class *, int);
99 static void set_passive(struct hfsc_class *);
101 static void init_ed(struct hfsc_class *, int);
102 static void update_ed(struct hfsc_class *, int);
103 static void update_d(struct hfsc_class *, int);
104 static void init_vf(struct hfsc_class *, int);
105 static void update_vf(struct hfsc_class *, int, uint64_t);
106 static ellist_t *ellist_alloc(void);
107 static void ellist_destroy(ellist_t *);
108 static void ellist_insert(struct hfsc_class *);
109 static void ellist_remove(struct hfsc_class *);
110 static void ellist_update(struct hfsc_class *);
111 struct hfsc_class *ellist_get_mindl(ellist_t *, uint64_t);
112 static actlist_t *actlist_alloc(void);
113 static void actlist_destroy(actlist_t *);
114 static void actlist_insert(struct hfsc_class *);
115 static void actlist_remove(struct hfsc_class *);
116 static void actlist_update(struct hfsc_class *);
118 static struct hfsc_class *actlist_firstfit(struct hfsc_class *, uint64_t);
120 static __inline uint64_t seg_x2y(uint64_t, uint64_t);
121 static __inline uint64_t seg_y2x(uint64_t, uint64_t);
122 static __inline uint64_t m2sm(u_int);
123 static __inline uint64_t m2ism(u_int);
124 static __inline uint64_t d2dx(u_int);
125 static u_int sm2m(uint64_t);
126 static u_int dx2d(uint64_t);
128 static void sc2isc(struct service_curve *, struct internal_sc *);
129 static void rtsc_init(struct runtime_sc *, struct internal_sc *,
131 static uint64_t rtsc_y2x(struct runtime_sc *, uint64_t);
132 static uint64_t rtsc_x2y(struct runtime_sc *, uint64_t);
133 static void rtsc_min(struct runtime_sc *, struct internal_sc *,
136 static void get_class_stats(struct hfsc_classstats *, struct hfsc_class *);
137 static struct hfsc_class *clh_to_clp(struct hfsc_if *, uint32_t);
142 #define is_a_parent_class(cl) ((cl)->cl_children != NULL)
144 #define HT_INFINITY 0xffffffffffffffffLL /* infinite time value */
147 hfsc_pfattach(struct pf_altq *a, struct ifaltq *ifq)
149 return altq_attach(ifq, ALTQT_HFSC, a->altq_disc, ifq_mapsubq_default,
150 hfsc_enqueue, hfsc_dequeue, hfsc_request, NULL, NULL);
154 hfsc_add_altq(struct pf_altq *a)
159 if ((ifp = ifunit(a->ifname)) == NULL)
161 if (!ifq_is_ready(&ifp->if_snd))
164 hif = kmalloc(sizeof(struct hfsc_if), M_ALTQ, M_WAITOK | M_ZERO);
166 hif->hif_eligible = ellist_alloc();
167 hif->hif_ifq = &ifp->if_snd;
168 ifq_purge_all(&ifp->if_snd);
170 /* keep the state in pf_altq */
177 hfsc_remove_altq(struct pf_altq *a)
181 if ((hif = a->altq_disc) == NULL)
185 hfsc_clear_interface(hif);
186 hfsc_class_destroy(hif->hif_rootclass);
188 ellist_destroy(hif->hif_eligible);
196 hfsc_add_queue_locked(struct pf_altq *a, struct hfsc_if *hif)
198 struct hfsc_class *cl, *parent;
199 struct hfsc_opts *opts;
200 struct service_curve rtsc, lssc, ulsc;
202 KKASSERT(a->qid != 0);
204 opts = &a->pq_u.hfsc_opts;
206 if (a->parent_qid == HFSC_NULLCLASS_HANDLE && hif->hif_rootclass == NULL)
208 else if ((parent = clh_to_clp(hif, a->parent_qid)) == NULL)
211 if (clh_to_clp(hif, a->qid) != NULL)
214 rtsc.m1 = opts->rtsc_m1;
215 rtsc.d = opts->rtsc_d;
216 rtsc.m2 = opts->rtsc_m2;
217 lssc.m1 = opts->lssc_m1;
218 lssc.d = opts->lssc_d;
219 lssc.m2 = opts->lssc_m2;
220 ulsc.m1 = opts->ulsc_m1;
221 ulsc.d = opts->ulsc_d;
222 ulsc.m2 = opts->ulsc_m2;
224 cl = hfsc_class_create(hif, &rtsc, &lssc, &ulsc, parent, a->qlimit,
225 opts->flags, a->qid);
233 hfsc_add_queue(struct pf_altq *a)
242 /* XXX not MP safe */
243 if ((hif = a->altq_disc) == NULL)
248 error = hfsc_add_queue_locked(a, hif);
255 hfsc_remove_queue_locked(struct pf_altq *a, struct hfsc_if *hif)
257 struct hfsc_class *cl;
259 if ((cl = clh_to_clp(hif, a->qid)) == NULL)
262 return (hfsc_class_destroy(cl));
266 hfsc_remove_queue(struct pf_altq *a)
272 /* XXX not MP safe */
273 if ((hif = a->altq_disc) == NULL)
278 error = hfsc_remove_queue_locked(a, hif);
285 hfsc_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
288 struct hfsc_class *cl;
289 struct hfsc_classstats stats;
293 if (*nbytes < sizeof(stats))
296 /* XXX not MP safe */
297 if ((hif = altq_lookup(a->ifname, ALTQT_HFSC)) == NULL)
303 if ((cl = clh_to_clp(hif, a->qid)) == NULL) {
308 get_class_stats(&stats, cl);
312 if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0)
314 *nbytes = sizeof(stats);
319 * bring the interface back to the initial state by discarding
320 * all the filters and classes except the root class.
323 hfsc_clear_interface(struct hfsc_if *hif)
325 struct hfsc_class *cl;
327 if (hif->hif_rootclass == NULL)
331 /* clear out the classes */
332 while ((cl = hif->hif_rootclass->cl_children) != NULL) {
334 * remove the first leaf class found in the hierarchy
337 for (; cl != NULL; cl = hfsc_nextclass(cl)) {
338 if (!is_a_parent_class(cl)) {
339 hfsc_class_destroy(cl);
349 hfsc_request(struct ifaltq_subque *ifsq, int req, void *arg)
351 struct ifaltq *ifq = ifsq->ifsq_altq;
352 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
357 if (ifsq_get_index(ifsq) == HFSC_SUBQ_INDEX) {
361 * Race happened, the unrelated subqueue was
362 * picked during the packet scheduler transition.
364 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
372 /* discard all the queued packets on the interface */
374 hfsc_purge(struct hfsc_if *hif)
376 struct hfsc_class *cl;
378 for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl)) {
379 if (!qempty(cl->cl_q))
382 if (ifq_is_enabled(hif->hif_ifq))
383 hif->hif_ifq->altq_subq[HFSC_SUBQ_INDEX].ifq_len = 0;
387 hfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc,
388 struct service_curve *fsc, struct service_curve *usc,
389 struct hfsc_class *parent, int qlimit, int flags, int qid)
391 struct hfsc_class *cl, *p;
394 if (hif->hif_classes >= HFSC_MAX_CLASSES)
398 if (flags & HFCF_RED) {
400 kprintf("hfsc_class_create: RED not configured for HFSC!\n");
406 cl = kmalloc(sizeof(*cl), M_ALTQ, M_WAITOK | M_ZERO);
407 cl->cl_q = kmalloc(sizeof(*cl->cl_q), M_ALTQ, M_WAITOK | M_ZERO);
408 cl->cl_actc = actlist_alloc();
411 qlimit = 50; /* use default */
412 qlimit(cl->cl_q) = qlimit;
413 qtype(cl->cl_q) = Q_DROPTAIL;
415 cl->cl_flags = flags;
417 if (flags & (HFCF_RED|HFCF_RIO)) {
418 int red_flags, red_pkttime;
422 if (rsc != NULL && rsc->m2 > m2)
424 if (fsc != NULL && fsc->m2 > m2)
426 if (usc != NULL && usc->m2 > m2)
430 if (flags & HFCF_ECN)
431 red_flags |= REDF_ECN;
433 if (flags & HFCF_CLEARDSCP)
434 red_flags |= RIOF_CLEARDSCP;
437 red_pkttime = 1000 * 1000 * 1000; /* 1 sec */
439 red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu
440 * 1000 * 1000 * 1000 / (m2 / 8);
441 if (flags & HFCF_RED) {
442 cl->cl_red = red_alloc(0, 0,
443 qlimit(cl->cl_q) * 10/100,
444 qlimit(cl->cl_q) * 30/100,
445 red_flags, red_pkttime);
446 if (cl->cl_red != NULL)
447 qtype(cl->cl_q) = Q_RED;
451 cl->cl_red = (red_t *)rio_alloc(0, NULL,
452 red_flags, red_pkttime);
453 if (cl->cl_red != NULL)
454 qtype(cl->cl_q) = Q_RIO;
458 #endif /* ALTQ_RED */
460 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0)) {
461 cl->cl_rsc = kmalloc(sizeof(*cl->cl_rsc), M_ALTQ, M_WAITOK);
462 sc2isc(rsc, cl->cl_rsc);
463 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0);
464 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0);
466 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0)) {
467 cl->cl_fsc = kmalloc(sizeof(*cl->cl_fsc), M_ALTQ, M_WAITOK);
468 sc2isc(fsc, cl->cl_fsc);
469 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0);
471 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0)) {
472 cl->cl_usc = kmalloc(sizeof(*cl->cl_usc), M_ALTQ, M_WAITOK);
473 sc2isc(usc, cl->cl_usc);
474 rtsc_init(&cl->cl_ulimit, cl->cl_usc, 0, 0);
477 cl->cl_id = hif->hif_classid++;
480 cl->cl_parent = parent;
486 * find a free slot in the class table. if the slot matching
487 * the lower bits of qid is free, use this slot. otherwise,
488 * use the first free slot.
490 i = qid % HFSC_MAX_CLASSES;
491 if (hif->hif_class_tbl[i] == NULL)
492 hif->hif_class_tbl[i] = cl;
494 for (i = 0; i < HFSC_MAX_CLASSES; i++) {
495 if (hif->hif_class_tbl[i] == NULL) {
496 hif->hif_class_tbl[i] = cl;
500 if (i == HFSC_MAX_CLASSES) {
506 if (flags & HFCF_DEFAULTCLASS)
507 hif->hif_defaultclass = cl;
509 if (parent == NULL) {
510 /* this is root class */
511 hif->hif_rootclass = cl;
512 } else if (parent->cl_children == NULL) {
513 /* add this class to the children list of the parent */
514 parent->cl_children = cl;
516 p = parent->cl_children;
517 while (p->cl_siblings != NULL)
526 if (cl->cl_actc != NULL)
527 actlist_destroy(cl->cl_actc);
528 if (cl->cl_red != NULL) {
530 if (q_is_rio(cl->cl_q))
531 rio_destroy((rio_t *)cl->cl_red);
534 if (q_is_red(cl->cl_q))
535 red_destroy(cl->cl_red);
538 if (cl->cl_fsc != NULL)
539 kfree(cl->cl_fsc, M_ALTQ);
540 if (cl->cl_rsc != NULL)
541 kfree(cl->cl_rsc, M_ALTQ);
542 if (cl->cl_usc != NULL)
543 kfree(cl->cl_usc, M_ALTQ);
544 if (cl->cl_q != NULL)
545 kfree(cl->cl_q, M_ALTQ);
551 hfsc_class_destroy(struct hfsc_class *cl)
560 if (is_a_parent_class(cl))
565 if (!qempty(cl->cl_q))
568 if (cl->cl_parent == NULL) {
569 /* this is root class */
571 struct hfsc_class *p = cl->cl_parent->cl_children;
574 cl->cl_parent->cl_children = cl->cl_siblings;
577 if (p->cl_siblings == cl) {
578 p->cl_siblings = cl->cl_siblings;
581 } while ((p = p->cl_siblings) != NULL);
586 for (i = 0; i < HFSC_MAX_CLASSES; i++) {
587 if (hif->hif_class_tbl[i] == cl) {
588 hif->hif_class_tbl[i] = NULL;
596 actlist_destroy(cl->cl_actc);
598 if (cl->cl_red != NULL) {
600 if (q_is_rio(cl->cl_q))
601 rio_destroy((rio_t *)cl->cl_red);
604 if (q_is_red(cl->cl_q))
605 red_destroy(cl->cl_red);
609 if (cl == hif->hif_rootclass)
610 hif->hif_rootclass = NULL;
611 if (cl == hif->hif_defaultclass)
612 hif->hif_defaultclass = NULL;
613 if (cl == hif->hif_pollcache)
614 hif->hif_pollcache = NULL;
616 if (cl->cl_usc != NULL)
617 kfree(cl->cl_usc, M_ALTQ);
618 if (cl->cl_fsc != NULL)
619 kfree(cl->cl_fsc, M_ALTQ);
620 if (cl->cl_rsc != NULL)
621 kfree(cl->cl_rsc, M_ALTQ);
622 kfree(cl->cl_q, M_ALTQ);
629 * hfsc_nextclass returns the next class in the tree.
631 * for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
634 static struct hfsc_class *
635 hfsc_nextclass(struct hfsc_class *cl)
637 if (cl->cl_children != NULL) {
638 cl = cl->cl_children;
639 } else if (cl->cl_siblings != NULL) {
640 cl = cl->cl_siblings;
642 while ((cl = cl->cl_parent) != NULL) {
643 if (cl->cl_siblings != NULL) {
644 cl = cl->cl_siblings;
654 * hfsc_enqueue is an enqueue function to be registered to
655 * (*altq_enqueue) in struct ifaltq.
658 hfsc_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m,
659 struct altq_pktattr *pktattr)
661 struct ifaltq *ifq = ifsq->ifsq_altq;
662 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
663 struct hfsc_class *cl;
666 if (ifsq_get_index(ifsq) != HFSC_SUBQ_INDEX) {
668 * Race happened, the unrelated subqueue was
669 * picked during the packet scheduler transition.
671 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
676 /* grab class set by classifier */
677 if ((m->m_flags & M_PKTHDR) == 0) {
678 /* should not happen */
679 if_printf(ifq->altq_ifp, "altq: packet does not have pkthdr\n");
684 if (m->m_pkthdr.fw_flags & PF_MBUF_STRUCTURE)
685 cl = clh_to_clp(hif, m->m_pkthdr.pf.qid);
688 if (cl == NULL || is_a_parent_class(cl)) {
689 cl = hif->hif_defaultclass;
696 cl->cl_pktattr = NULL;
698 if (hfsc_addq(cl, m) != 0) {
699 /* drop occurred. mbuf was freed in hfsc_addq. */
700 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len);
705 cl->cl_hif->hif_packets++;
707 /* successfully queued. */
708 if (qlen(cl->cl_q) == 1)
709 set_active(cl, m_pktlen(m));
715 * hfsc_dequeue is a dequeue function to be registered to
716 * (*altq_dequeue) in struct ifaltq.
718 * note: ALTDQ_POLL returns the next packet without removing the packet
719 * from the queue. ALTDQ_REMOVE is a normal dequeue operation.
720 * ALTDQ_REMOVE must return the same packet if called immediately
724 hfsc_dequeue(struct ifaltq_subque *ifsq, struct mbuf *mpolled, int op)
726 struct ifaltq *ifq = ifsq->ifsq_altq;
727 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
728 struct hfsc_class *cl;
734 if (ifsq_get_index(ifsq) != HFSC_SUBQ_INDEX) {
736 * Race happened, the unrelated subqueue was
737 * picked during the packet scheduler transition.
739 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
743 if (hif->hif_packets == 0) {
744 /* no packet in the tree */
749 cur_time = read_machclk();
751 if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) {
752 cl = hif->hif_pollcache;
753 hif->hif_pollcache = NULL;
754 /* check if the class was scheduled by real-time criteria */
755 if (cl->cl_rsc != NULL)
756 realtime = (cl->cl_e <= cur_time);
759 * if there are eligible classes, use real-time criteria.
760 * find the class with the minimum deadline among
761 * the eligible classes.
763 if ((cl = ellist_get_mindl(hif->hif_eligible, cur_time)) != NULL) {
770 * use link-sharing criteria
771 * get the class with the minimum vt in the hierarchy
773 cl = hif->hif_rootclass;
774 while (is_a_parent_class(cl)) {
776 cl = actlist_firstfit(cl, cur_time);
780 kprintf("%d fit but none found\n",fits);
786 * update parent's cl_cvtmin.
787 * don't update if the new vt is smaller.
789 if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
790 cl->cl_parent->cl_cvtmin = cl->cl_vt;
797 if (op == ALTDQ_POLL) {
800 * Don't use poll cache; the poll/dequeue
801 * model is no longer applicable to SMP
809 * The dequeue at (+) will hit the poll
810 * cache set by CPU-B.
812 hif->hif_pollcache = cl;
821 panic("hfsc_dequeue:");
823 cl->cl_hif->hif_packets--;
825 PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len);
827 update_vf(cl, len, cur_time);
831 if (!qempty(cl->cl_q)) {
832 if (cl->cl_rsc != NULL) {
834 next_len = m_pktlen(qhead(cl->cl_q));
837 update_ed(cl, next_len);
839 update_d(cl, next_len);
842 /* the class becomes passive */
847 KKASSERT(mpolled == NULL || m == mpolled);
852 hfsc_addq(struct hfsc_class *cl, struct mbuf *m)
856 if (q_is_rio(cl->cl_q))
857 return rio_addq((rio_t *)cl->cl_red, cl->cl_q,
861 if (q_is_red(cl->cl_q))
862 return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr);
864 if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) {
869 if (cl->cl_flags & HFCF_CLEARDSCP)
870 write_dsfield(m, cl->cl_pktattr, 0);
878 hfsc_getq(struct hfsc_class *cl)
881 if (q_is_rio(cl->cl_q))
882 return rio_getq((rio_t *)cl->cl_red, cl->cl_q);
885 if (q_is_red(cl->cl_q))
886 return red_getq(cl->cl_red, cl->cl_q);
888 return _getq(cl->cl_q);
892 hfsc_pollq(struct hfsc_class *cl)
894 return qhead(cl->cl_q);
898 hfsc_purgeq(struct hfsc_class *cl)
902 if (qempty(cl->cl_q))
905 while ((m = _getq(cl->cl_q)) != NULL) {
906 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m));
908 cl->cl_hif->hif_packets--;
909 cl->cl_hif->hif_ifq->altq_subq[HFSC_SUBQ_INDEX].ifq_len--;
911 KKASSERT(qlen(cl->cl_q) == 0);
913 update_vf(cl, 0, 0); /* remove cl from the actlist */
918 set_active(struct hfsc_class *cl, int len)
920 if (cl->cl_rsc != NULL)
922 if (cl->cl_fsc != NULL)
925 cl->cl_stats.period++;
929 set_passive(struct hfsc_class *cl)
931 if (cl->cl_rsc != NULL)
935 * actlist is now handled in update_vf() so that update_vf(cl, 0, 0)
936 * needs to be called explicitly to remove a class from actlist
941 init_ed(struct hfsc_class *cl, int next_len)
945 cur_time = read_machclk();
947 /* update the deadline curve */
948 rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul);
951 * update the eligible curve.
952 * for concave, it is equal to the deadline curve.
953 * for convex, it is a linear curve with slope m2.
955 cl->cl_eligible = cl->cl_deadline;
956 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
957 cl->cl_eligible.dx = 0;
958 cl->cl_eligible.dy = 0;
961 /* compute e and d */
962 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
963 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
969 update_ed(struct hfsc_class *cl, int next_len)
971 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
972 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
978 update_d(struct hfsc_class *cl, int next_len)
980 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
984 init_vf(struct hfsc_class *cl, int len)
986 struct hfsc_class *max_cl, *p;
987 uint64_t vt, f, cur_time;
992 for ( ; cl->cl_parent != NULL; cl = cl->cl_parent) {
993 if (go_active && cl->cl_nactive++ == 0)
999 max_cl = actlist_last(cl->cl_parent->cl_actc);
1000 if (max_cl != NULL) {
1002 * set vt to the average of the min and max
1003 * classes. if the parent's period didn't
1004 * change, don't decrease vt of the class.
1007 if (cl->cl_parent->cl_cvtmin != 0)
1008 vt = (cl->cl_parent->cl_cvtmin + vt)/2;
1010 if (cl->cl_parent->cl_vtperiod !=
1011 cl->cl_parentperiod || vt > cl->cl_vt)
1015 * first child for a new parent backlog period.
1016 * add parent's cvtmax to vtoff of children
1017 * to make a new vt (vtoff + vt) larger than
1018 * the vt in the last period for all children.
1020 vt = cl->cl_parent->cl_cvtmax;
1021 for (p = cl->cl_parent->cl_children; p != NULL;
1025 cl->cl_parent->cl_cvtmax = 0;
1026 cl->cl_parent->cl_cvtmin = 0;
1028 cl->cl_initvt = cl->cl_vt;
1030 /* update the virtual curve */
1031 vt = cl->cl_vt + cl->cl_vtoff;
1032 rtsc_min(&cl->cl_virtual, cl->cl_fsc, vt, cl->cl_total);
1033 if (cl->cl_virtual.x == vt) {
1034 cl->cl_virtual.x -= cl->cl_vtoff;
1039 cl->cl_vtperiod++; /* increment vt period */
1040 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
1041 if (cl->cl_parent->cl_nactive == 0)
1042 cl->cl_parentperiod++;
1047 if (cl->cl_usc != NULL) {
1048 /* class has upper limit curve */
1050 cur_time = read_machclk();
1052 /* update the ulimit curve */
1053 rtsc_min(&cl->cl_ulimit, cl->cl_usc, cur_time,
1056 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
1062 if (cl->cl_myf > cl->cl_cfmin)
1066 if (f != cl->cl_f) {
1068 update_cfmin(cl->cl_parent);
1074 update_vf(struct hfsc_class *cl, int len, uint64_t cur_time)
1076 uint64_t f, myf_bound, delta;
1079 go_passive = qempty(cl->cl_q);
1081 for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
1082 cl->cl_total += len;
1084 if (cl->cl_fsc == NULL || cl->cl_nactive == 0)
1087 if (go_passive && --cl->cl_nactive == 0)
1093 /* no more active child, going passive */
1095 /* update cvtmax of the parent class */
1096 if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
1097 cl->cl_parent->cl_cvtmax = cl->cl_vt;
1099 /* remove this class from the vt list */
1102 update_cfmin(cl->cl_parent);
1110 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
1111 - cl->cl_vtoff + cl->cl_vtadj;
1114 * if vt of the class is smaller than cvtmin,
1115 * the class was skipped in the past due to non-fit.
1116 * if so, we need to adjust vtadj.
1118 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
1119 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
1120 cl->cl_vt = cl->cl_parent->cl_cvtmin;
1123 /* update the vt list */
1126 if (cl->cl_usc != NULL) {
1127 cl->cl_myf = cl->cl_myfadj
1128 + rtsc_y2x(&cl->cl_ulimit, cl->cl_total);
1131 * if myf lags behind by more than one clock tick
1132 * from the current time, adjust myfadj to prevent
1133 * a rate-limited class from going greedy.
1134 * in a steady state under rate-limiting, myf
1135 * fluctuates within one clock tick.
1137 myf_bound = cur_time - machclk_per_tick;
1138 if (cl->cl_myf < myf_bound) {
1139 delta = cur_time - cl->cl_myf;
1140 cl->cl_myfadj += delta;
1141 cl->cl_myf += delta;
1145 /* cl_f is max(cl_myf, cl_cfmin) */
1146 if (cl->cl_myf > cl->cl_cfmin)
1150 if (f != cl->cl_f) {
1152 update_cfmin(cl->cl_parent);
1158 update_cfmin(struct hfsc_class *cl)
1160 struct hfsc_class *p;
1163 if (TAILQ_EMPTY(cl->cl_actc)) {
1167 cfmin = HT_INFINITY;
1168 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) {
1173 if (p->cl_f < cfmin)
1176 cl->cl_cfmin = cfmin;
1180 * TAILQ based ellist and actlist implementation
1181 * (ion wanted to make a calendar queue based implementation)
1184 * eligible list holds backlogged classes being sorted by their eligible times.
1185 * there is one eligible list per interface.
1193 head = kmalloc(sizeof(*head), M_ALTQ, M_WAITOK);
1199 ellist_destroy(ellist_t *head)
1201 kfree(head, M_ALTQ);
1205 ellist_insert(struct hfsc_class *cl)
1207 struct hfsc_if *hif = cl->cl_hif;
1208 struct hfsc_class *p;
1210 /* check the last entry first */
1211 if ((p = TAILQ_LAST(hif->hif_eligible, _eligible)) == NULL ||
1212 p->cl_e <= cl->cl_e) {
1213 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
1217 TAILQ_FOREACH(p, hif->hif_eligible, cl_ellist) {
1218 if (cl->cl_e < p->cl_e) {
1219 TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1223 KKASSERT(0); /* should not reach here */
1227 ellist_remove(struct hfsc_class *cl)
1229 struct hfsc_if *hif = cl->cl_hif;
1231 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1235 ellist_update(struct hfsc_class *cl)
1237 struct hfsc_if *hif = cl->cl_hif;
1238 struct hfsc_class *p, *last;
1241 * the eligible time of a class increases monotonically.
1242 * if the next entry has a larger eligible time, nothing to do.
1244 p = TAILQ_NEXT(cl, cl_ellist);
1245 if (p == NULL || cl->cl_e <= p->cl_e)
1248 /* check the last entry */
1249 last = TAILQ_LAST(hif->hif_eligible, _eligible);
1250 KKASSERT(last != NULL);
1251 if (last->cl_e <= cl->cl_e) {
1252 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1253 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
1258 * the new position must be between the next entry
1259 * and the last entry
1261 while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) {
1262 if (cl->cl_e < p->cl_e) {
1263 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1264 TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1268 KKASSERT(0); /* should not reach here */
1271 /* find the class with the minimum deadline among the eligible classes */
1273 ellist_get_mindl(ellist_t *head, uint64_t cur_time)
1275 struct hfsc_class *p, *cl = NULL;
1277 TAILQ_FOREACH(p, head, cl_ellist) {
1278 if (p->cl_e > cur_time)
1280 if (cl == NULL || p->cl_d < cl->cl_d)
1287 * active children list holds backlogged child classes being sorted
1288 * by their virtual time.
1289 * each intermediate class has one active children list.
1296 head = kmalloc(sizeof(*head), M_ALTQ, M_WAITOK);
1302 actlist_destroy(actlist_t *head)
1304 kfree(head, M_ALTQ);
1307 actlist_insert(struct hfsc_class *cl)
1309 struct hfsc_class *p;
1311 /* check the last entry first */
1312 if ((p = TAILQ_LAST(cl->cl_parent->cl_actc, _active)) == NULL
1313 || p->cl_vt <= cl->cl_vt) {
1314 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
1318 TAILQ_FOREACH(p, cl->cl_parent->cl_actc, cl_actlist) {
1319 if (cl->cl_vt < p->cl_vt) {
1320 TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1324 KKASSERT(0); /* should not reach here */
1328 actlist_remove(struct hfsc_class *cl)
1330 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1334 actlist_update(struct hfsc_class *cl)
1336 struct hfsc_class *p, *last;
1339 * the virtual time of a class increases monotonically during its
1340 * backlogged period.
1341 * if the next entry has a larger virtual time, nothing to do.
1343 p = TAILQ_NEXT(cl, cl_actlist);
1344 if (p == NULL || cl->cl_vt < p->cl_vt)
1347 /* check the last entry */
1348 last = TAILQ_LAST(cl->cl_parent->cl_actc, _active);
1349 KKASSERT(last != NULL);
1350 if (last->cl_vt <= cl->cl_vt) {
1351 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1352 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
1357 * the new position must be between the next entry
1358 * and the last entry
1360 while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) {
1361 if (cl->cl_vt < p->cl_vt) {
1362 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1363 TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1367 KKASSERT(0); /* should not reach here */
1370 static struct hfsc_class *
1371 actlist_firstfit(struct hfsc_class *cl, uint64_t cur_time)
1373 struct hfsc_class *p;
1375 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) {
1376 if (p->cl_f <= cur_time)
1383 * service curve support functions
1385 * external service curve parameters
1388 * internal service curve parameters
1389 * sm: (bytes/tsc_interval) << SM_SHIFT
1390 * ism: (tsc_count/byte) << ISM_SHIFT
1393 * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits.
1394 * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU
1395 * speed. SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective
1396 * digits in decimal using the following table.
1398 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
1399 * ----------+-------------------------------------------------------
1400 * bytes/nsec 12.5e-6 125e-6 1250e-6 12500e-6 125000e-6
1401 * sm(500MHz) 25.0e-6 250e-6 2500e-6 25000e-6 250000e-6
1402 * sm(200MHz) 62.5e-6 625e-6 6250e-6 62500e-6 625000e-6
1404 * nsec/byte 80000 8000 800 80 8
1405 * ism(500MHz) 40000 4000 400 40 4
1406 * ism(200MHz) 16000 1600 160 16 1.6
1409 #define ISM_SHIFT 10
1411 #define SM_MASK ((1LL << SM_SHIFT) - 1)
1412 #define ISM_MASK ((1LL << ISM_SHIFT) - 1)
1414 static __inline uint64_t
1415 seg_x2y(uint64_t x, uint64_t sm)
1421 * y = x * sm >> SM_SHIFT
1422 * but divide it for the upper and lower bits to avoid overflow
1424 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
1428 static __inline uint64_t
1429 seg_y2x(uint64_t y, uint64_t ism)
1435 else if (ism == HT_INFINITY)
1438 x = (y >> ISM_SHIFT) * ism + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
1443 static __inline uint64_t
1448 sm = ((uint64_t)m << SM_SHIFT) / 8 / machclk_freq;
1452 static __inline uint64_t
1460 ism = ((uint64_t)machclk_freq << ISM_SHIFT) * 8 / m;
1464 static __inline uint64_t
1469 dx = ((uint64_t)d * machclk_freq) / 1000;
1478 m = (sm * 8 * machclk_freq) >> SM_SHIFT;
1487 d = dx * 1000 / machclk_freq;
1492 sc2isc(struct service_curve *sc, struct internal_sc *isc)
1494 isc->sm1 = m2sm(sc->m1);
1495 isc->ism1 = m2ism(sc->m1);
1496 isc->dx = d2dx(sc->d);
1497 isc->dy = seg_x2y(isc->dx, isc->sm1);
1498 isc->sm2 = m2sm(sc->m2);
1499 isc->ism2 = m2ism(sc->m2);
1503 * initialize the runtime service curve with the given internal
1504 * service curve starting at (x, y).
1507 rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, uint64_t x, uint64_t y)
1511 rtsc->sm1 = isc->sm1;
1512 rtsc->ism1 = isc->ism1;
1515 rtsc->sm2 = isc->sm2;
1516 rtsc->ism2 = isc->ism2;
1520 * calculate the y-projection of the runtime service curve by the
1521 * given x-projection value
1524 rtsc_y2x(struct runtime_sc *rtsc, uint64_t y)
1530 } else if (y <= rtsc->y + rtsc->dy) {
1531 /* x belongs to the 1st segment */
1533 x = rtsc->x + rtsc->dx;
1535 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
1537 /* x belongs to the 2nd segment */
1538 x = rtsc->x + rtsc->dx
1539 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
1545 rtsc_x2y(struct runtime_sc *rtsc, uint64_t x)
1551 } else if (x <= rtsc->x + rtsc->dx) {
1552 /* y belongs to the 1st segment */
1553 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
1555 /* y belongs to the 2nd segment */
1556 y = rtsc->y + rtsc->dy
1557 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
1562 * update the runtime service curve by taking the minimum of the current
1563 * runtime service curve and the service curve starting at (x, y).
1566 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, uint64_t x, uint64_t y)
1568 uint64_t y1, y2, dx, dy;
1570 if (isc->sm1 <= isc->sm2) {
1571 /* service curve is convex */
1572 y1 = rtsc_x2y(rtsc, x);
1574 /* the current rtsc is smaller */
1582 * service curve is concave
1583 * compute the two y values of the current rtsc
1587 y1 = rtsc_x2y(rtsc, x);
1589 /* rtsc is below isc, no change to rtsc */
1593 y2 = rtsc_x2y(rtsc, x + isc->dx);
1594 if (y2 >= y + isc->dy) {
1595 /* rtsc is above isc, replace rtsc by isc */
1604 * the two curves intersect
1605 * compute the offsets (dx, dy) using the reverse
1606 * function of seg_x2y()
1607 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
1609 dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2);
1611 * check if (x, y1) belongs to the 1st segment of rtsc.
1612 * if so, add the offset.
1614 if (rtsc->x + rtsc->dx > x)
1615 dx += rtsc->x + rtsc->dx - x;
1616 dy = seg_x2y(dx, isc->sm1);
1625 get_class_stats(struct hfsc_classstats *sp, struct hfsc_class *cl)
1627 sp->class_id = cl->cl_id;
1628 sp->class_handle = cl->cl_handle;
1630 if (cl->cl_rsc != NULL) {
1631 sp->rsc.m1 = sm2m(cl->cl_rsc->sm1);
1632 sp->rsc.d = dx2d(cl->cl_rsc->dx);
1633 sp->rsc.m2 = sm2m(cl->cl_rsc->sm2);
1639 if (cl->cl_fsc != NULL) {
1640 sp->fsc.m1 = sm2m(cl->cl_fsc->sm1);
1641 sp->fsc.d = dx2d(cl->cl_fsc->dx);
1642 sp->fsc.m2 = sm2m(cl->cl_fsc->sm2);
1648 if (cl->cl_usc != NULL) {
1649 sp->usc.m1 = sm2m(cl->cl_usc->sm1);
1650 sp->usc.d = dx2d(cl->cl_usc->dx);
1651 sp->usc.m2 = sm2m(cl->cl_usc->sm2);
1658 sp->total = cl->cl_total;
1659 sp->cumul = cl->cl_cumul;
1666 sp->initvt = cl->cl_initvt;
1667 sp->vtperiod = cl->cl_vtperiod;
1668 sp->parentperiod = cl->cl_parentperiod;
1669 sp->nactive = cl->cl_nactive;
1670 sp->vtoff = cl->cl_vtoff;
1671 sp->cvtmax = cl->cl_cvtmax;
1672 sp->myf = cl->cl_myf;
1673 sp->cfmin = cl->cl_cfmin;
1674 sp->cvtmin = cl->cl_cvtmin;
1675 sp->myfadj = cl->cl_myfadj;
1676 sp->vtadj = cl->cl_vtadj;
1678 sp->cur_time = read_machclk();
1679 sp->machclk_freq = machclk_freq;
1681 sp->qlength = qlen(cl->cl_q);
1682 sp->qlimit = qlimit(cl->cl_q);
1683 sp->xmit_cnt = cl->cl_stats.xmit_cnt;
1684 sp->drop_cnt = cl->cl_stats.drop_cnt;
1685 sp->period = cl->cl_stats.period;
1687 sp->qtype = qtype(cl->cl_q);
1689 if (q_is_red(cl->cl_q))
1690 red_getstats(cl->cl_red, &sp->red[0]);
1693 if (q_is_rio(cl->cl_q))
1694 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]);
1698 /* convert a class handle to the corresponding class pointer */
1699 static struct hfsc_class *
1700 clh_to_clp(struct hfsc_if *hif, uint32_t chandle)
1703 struct hfsc_class *cl;
1708 * first, try optimistically the slot matching the lower bits of
1709 * the handle. if it fails, do the linear table search.
1711 i = chandle % HFSC_MAX_CLASSES;
1712 if ((cl = hif->hif_class_tbl[i]) != NULL && cl->cl_handle == chandle)
1714 for (i = 0; i < HFSC_MAX_CLASSES; i++)
1715 if ((cl = hif->hif_class_tbl[i]) != NULL &&
1716 cl->cl_handle == chandle)
1721 #endif /* ALTQ_HFSC */