1 /* $KAME: altq_subr.c,v 1.23 2004/04/20 16:10:06 itojun Exp $ */
2 /* $DragonFly: src/sys/net/altq/altq_subr.c,v 1.12 2008/05/14 11:59:23 sephe Exp $ */
5 * Copyright (C) 1997-2003
6 * Sony Computer Science Laboratories Inc. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include "opt_inet6.h"
34 #include <sys/param.h>
35 #include <sys/malloc.h>
37 #include <sys/systm.h>
39 #include <sys/socket.h>
40 #include <sys/socketvar.h>
41 #include <sys/kernel.h>
42 #include <sys/callout.h>
43 #include <sys/errno.h>
44 #include <sys/syslog.h>
45 #include <sys/sysctl.h>
46 #include <sys/queue.h>
47 #include <sys/thread2.h>
50 #include <net/if_dl.h>
51 #include <net/if_types.h>
52 #include <net/ifq_var.h>
53 #include <net/netmsg2.h>
54 #include <net/netisr2.h>
56 #include <netinet/in.h>
57 #include <netinet/in_systm.h>
58 #include <netinet/ip.h>
60 #include <netinet/ip6.h>
62 #include <netinet/tcp.h>
63 #include <netinet/udp.h>
65 #include <net/pf/pfvar.h>
66 #include <net/altq/altq.h>
68 /* machine dependent clock related includes */
69 #include <machine/clock.h> /* for tsc_frequency */
70 #include <machine/md_var.h> /* for cpu_feature */
71 #include <machine/specialreg.h> /* for CPUID_TSC */
74 * internal function prototypes
76 static void tbr_timeout(void *);
77 static void tbr_timeout_dispatch(netmsg_t);
78 static int altq_enable_locked(struct ifaltq *);
79 static int altq_disable_locked(struct ifaltq *);
80 static int altq_detach_locked(struct ifaltq *);
81 static int tbr_set_locked(struct ifaltq *, struct tb_profile *);
83 int (*altq_input)(struct mbuf *, int) = NULL;
84 static int tbr_timer = 0; /* token bucket regulator timer */
85 static struct callout tbr_callout;
86 static struct netmsg_base tbr_timeout_netmsg;
88 int pfaltq_running; /* keep track of running state */
90 MALLOC_DEFINE(M_ALTQ, "altq", "ALTQ structures");
93 * alternate queueing support routines
96 /* look up the queue state by the interface name and the queueing type. */
98 altq_lookup(const char *name, int type)
102 if ((ifp = ifunit(name)) != NULL) {
103 if (type != ALTQT_NONE && ifp->if_snd.altq_type == type)
104 return (ifp->if_snd.altq_disc);
111 altq_attach(struct ifaltq *ifq, int type, void *discipline,
112 altq_mapsubq_t mapsubq,
113 ifsq_enqueue_t enqueue, ifsq_dequeue_t dequeue, ifsq_request_t request,
115 void *(*classify)(struct ifaltq *, struct mbuf *, struct altq_pktattr *))
117 if (!ifq_is_ready(ifq))
120 ifq->altq_type = type;
121 ifq->altq_disc = discipline;
122 ifq->altq_clfier = clfier;
123 ifq->altq_classify = classify;
124 ifq->altq_flags &= (ALTQF_CANTCHANGE|ALTQF_ENABLED);
125 ifq_set_methods(ifq, mapsubq, enqueue, dequeue, request);
130 altq_detach_locked(struct ifaltq *ifq)
132 if (!ifq_is_ready(ifq))
134 if (ifq_is_enabled(ifq))
136 if (!ifq_is_attached(ifq))
139 ifq_set_classic(ifq);
140 ifq->altq_type = ALTQT_NONE;
141 ifq->altq_disc = NULL;
142 ifq->altq_clfier = NULL;
143 ifq->altq_classify = NULL;
144 ifq->altq_flags &= ALTQF_CANTCHANGE;
149 altq_detach(struct ifaltq *ifq)
154 error = altq_detach_locked(ifq);
160 altq_enable_locked(struct ifaltq *ifq)
162 if (!ifq_is_ready(ifq))
164 if (ifq_is_enabled(ifq))
167 ifq_purge_all_locked(ifq);
169 ifq->altq_flags |= ALTQF_ENABLED;
170 if (ifq->altq_clfier != NULL)
171 ifq->altq_flags |= ALTQF_CLASSIFY;
176 altq_enable(struct ifaltq *ifq)
181 error = altq_enable_locked(ifq);
187 altq_disable_locked(struct ifaltq *ifq)
189 if (!ifq_is_enabled(ifq))
192 ifq_purge_all_locked(ifq);
193 ifq->altq_flags &= ~(ALTQF_ENABLED|ALTQF_CLASSIFY);
198 altq_disable(struct ifaltq *ifq)
203 error = altq_disable_locked(ifq);
209 * internal representation of token bucket parameters
210 * rate: byte_per_unittime << 32
211 * (((bits_per_sec) / 8) << 32) / machclk_freq
216 #define TBR_SCALE(x) ((int64_t)(x) << TBR_SHIFT)
217 #define TBR_UNSCALE(x) ((x) >> TBR_SHIFT)
220 tbr_dequeue(struct ifaltq_subque *ifsq, int op)
222 struct ifaltq *ifq = ifsq->ifsq_altq;
223 struct tb_regulator *tbr;
228 if (ifsq_get_index(ifsq) != ALTQ_SUBQ_INDEX_DEFAULT) {
230 * Race happened, the unrelated subqueue was
231 * picked during the packet scheduler transition.
233 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
239 if (op == ALTDQ_REMOVE && tbr->tbr_lastop == ALTDQ_POLL) {
240 /* if this is a remove after poll, bypass tbr check */
242 /* update token only when it is negative */
243 if (tbr->tbr_token <= 0) {
244 now = read_machclk();
245 interval = now - tbr->tbr_last;
246 if (interval >= tbr->tbr_filluptime)
247 tbr->tbr_token = tbr->tbr_depth;
249 tbr->tbr_token += interval * tbr->tbr_rate;
250 if (tbr->tbr_token > tbr->tbr_depth)
251 tbr->tbr_token = tbr->tbr_depth;
255 /* if token is still negative, don't allow dequeue */
256 if (tbr->tbr_token <= 0) {
262 if (ifq_is_enabled(ifq))
263 m = (*ifsq->ifsq_dequeue)(ifsq, op);
265 m = ifsq_classic_dequeue(ifsq, op);
267 if (m != NULL && op == ALTDQ_REMOVE)
268 tbr->tbr_token -= TBR_SCALE(m_pktlen(m));
269 tbr->tbr_lastop = op;
275 * set a token bucket regulator.
276 * if the specified rate is zero, the token bucket regulator is deleted.
279 tbr_set_locked(struct ifaltq *ifq, struct tb_profile *profile)
281 struct tb_regulator *tbr, *otbr;
283 if (machclk_freq == 0)
285 if (machclk_freq == 0) {
286 kprintf("%s: no cpu clock available!\n", __func__);
290 if (profile->rate == 0) {
291 /* delete this tbr */
292 if ((tbr = ifq->altq_tbr) == NULL)
294 ifq->altq_tbr = NULL;
299 tbr = kmalloc(sizeof(*tbr), M_ALTQ, M_WAITOK | M_ZERO);
300 tbr->tbr_rate = TBR_SCALE(profile->rate / 8) / machclk_freq;
301 tbr->tbr_depth = TBR_SCALE(profile->depth);
302 if (tbr->tbr_rate > 0)
303 tbr->tbr_filluptime = tbr->tbr_depth / tbr->tbr_rate;
305 tbr->tbr_filluptime = 0xffffffffffffffffLL;
306 tbr->tbr_token = tbr->tbr_depth;
307 tbr->tbr_last = read_machclk();
308 tbr->tbr_lastop = ALTDQ_REMOVE;
310 otbr = ifq->altq_tbr;
311 ifq->altq_tbr = tbr; /* set the new tbr */
315 else if (tbr_timer == 0) {
316 callout_reset_bycpu(&tbr_callout, 1, tbr_timeout, NULL, 0);
323 tbr_set(struct ifaltq *ifq, struct tb_profile *profile)
328 error = tbr_set_locked(ifq, profile);
334 tbr_timeout(void *arg __unused)
336 struct lwkt_msg *lmsg = &tbr_timeout_netmsg.lmsg;
338 KASSERT(mycpuid == 0, ("not on cpu0"));
340 if (lmsg->ms_flags & MSGF_DONE)
341 lwkt_sendmsg_oncpu(netisr_cpuport(0), lmsg);
346 * tbr_timeout goes through the interface list, and kicks the drivers
350 tbr_timeout_dispatch(netmsg_t nmsg)
352 const struct ifnet_array *arr;
355 KASSERT(&curthread->td_msgport == netisr_cpuport(0),
359 lwkt_replymsg(&nmsg->lmsg, 0); /* reply ASAP */
363 arr = ifnet_array_get();
364 for (i = 0; i < arr->ifnet_count; ++i) {
365 struct ifnet *ifp = arr->ifnet_arr[i];
366 struct ifaltq_subque *ifsq;
368 if (ifp->if_snd.altq_tbr == NULL)
371 ifsq = &ifp->if_snd.altq_subq[ALTQ_SUBQ_INDEX_DEFAULT];
373 if (!ifsq_is_empty(ifsq) && ifp->if_start != NULL) {
374 ifsq_serialize_hw(ifsq);
375 (*ifp->if_start)(ifp, ifsq);
376 ifsq_deserialize_hw(ifsq);
380 callout_reset(&tbr_callout, 1, tbr_timeout, NULL);
382 tbr_timer = 0; /* don't need tbr_timer anymore */
386 * get token bucket regulator profile
389 tbr_get(struct ifaltq *ifq, struct tb_profile *profile)
391 struct tb_regulator *tbr;
393 if ((tbr = ifq->altq_tbr) == NULL) {
398 (u_int)TBR_UNSCALE(tbr->tbr_rate * 8 * machclk_freq);
399 profile->depth = (u_int)TBR_UNSCALE(tbr->tbr_depth);
405 * attach a discipline to the interface. if one already exists, it is
409 altq_pfattach(struct pf_altq *a)
415 if (a->scheduler == ALTQT_NONE)
418 if (a->altq_disc == NULL)
423 ifp = ifunit(a->ifname);
432 switch (a->scheduler) {
435 error = cbq_pfattach(a, ifq);
440 error = priq_pfattach(a, ifq);
445 error = hfsc_pfattach(a, ifq);
450 error = fairq_pfattach(a, ifq);
458 /* if the state is running, enable altq */
459 if (error == 0 && pfaltq_running && ifq->altq_type != ALTQT_NONE &&
460 !ifq_is_enabled(ifq))
461 error = altq_enable_locked(ifq);
463 /* if altq is already enabled, reset set tokenbucket regulator */
464 if (error == 0 && ifq_is_enabled(ifq)) {
465 struct tb_profile tb;
467 tb.rate = a->ifbandwidth;
468 tb.depth = a->tbrsize;
469 error = tbr_set_locked(ifq, &tb);
478 * detach a discipline from the interface.
479 * it is possible that the discipline was already overridden by another
483 altq_pfdetach(struct pf_altq *a)
491 ifp = ifunit(a->ifname);
498 /* if this discipline is no longer referenced, just return */
499 if (a->altq_disc == NULL) {
506 if (a->altq_disc != ifq->altq_disc)
509 if (ifq_is_enabled(ifq))
510 error = altq_disable_locked(ifq);
512 error = altq_detach_locked(ifq);
521 * add a discipline or a queue
524 altq_add(struct pf_altq *a)
528 if (a->qname[0] != 0)
529 return (altq_add_queue(a));
531 if (machclk_freq == 0)
533 if (machclk_freq == 0)
534 panic("altq_add: no cpu clock");
536 switch (a->scheduler) {
539 error = cbq_add_altq(a);
544 error = priq_add_altq(a);
549 error = hfsc_add_altq(a);
554 error = fairq_add_altq(a);
565 * remove a discipline or a queue
568 altq_remove(struct pf_altq *a)
572 if (a->qname[0] != 0)
573 return (altq_remove_queue(a));
575 switch (a->scheduler) {
578 error = cbq_remove_altq(a);
583 error = priq_remove_altq(a);
588 error = hfsc_remove_altq(a);
593 error = fairq_remove_altq(a);
604 * add a queue to the discipline
607 altq_add_queue(struct pf_altq *a)
611 switch (a->scheduler) {
614 error = cbq_add_queue(a);
619 error = priq_add_queue(a);
624 error = hfsc_add_queue(a);
629 error = fairq_add_queue(a);
640 * remove a queue from the discipline
643 altq_remove_queue(struct pf_altq *a)
647 switch (a->scheduler) {
650 error = cbq_remove_queue(a);
655 error = priq_remove_queue(a);
660 error = hfsc_remove_queue(a);
665 error = fairq_remove_queue(a);
676 * get queue statistics
679 altq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
683 switch (a->scheduler) {
686 error = cbq_getqstats(a, ubuf, nbytes);
691 error = priq_getqstats(a, ubuf, nbytes);
696 error = hfsc_getqstats(a, ubuf, nbytes);
701 error = fairq_getqstats(a, ubuf, nbytes);
712 * read and write diffserv field in IPv4 or IPv6 header
715 read_dsfield(struct mbuf *m, struct altq_pktattr *pktattr)
718 uint8_t ds_field = 0;
720 if (pktattr == NULL ||
721 (pktattr->pattr_af != AF_INET && pktattr->pattr_af != AF_INET6))
724 /* verify that pattr_hdr is within the mbuf data */
725 for (m0 = m; m0 != NULL; m0 = m0->m_next) {
726 if ((pktattr->pattr_hdr >= m0->m_data) &&
727 (pktattr->pattr_hdr < m0->m_data + m0->m_len))
731 /* ick, pattr_hdr is stale */
732 pktattr->pattr_af = AF_UNSPEC;
734 kprintf("read_dsfield: can't locate header!\n");
739 if (pktattr->pattr_af == AF_INET) {
740 struct ip *ip = (struct ip *)pktattr->pattr_hdr;
743 return ((uint8_t)0); /* version mismatch! */
744 ds_field = ip->ip_tos;
747 else if (pktattr->pattr_af == AF_INET6) {
748 struct ip6_hdr *ip6 = (struct ip6_hdr *)pktattr->pattr_hdr;
751 flowlabel = ntohl(ip6->ip6_flow);
752 if ((flowlabel >> 28) != 6)
753 return ((uint8_t)0); /* version mismatch! */
754 ds_field = (flowlabel >> 20) & 0xff;
761 write_dsfield(struct mbuf *m, struct altq_pktattr *pktattr, uint8_t dsfield)
765 if (pktattr == NULL ||
766 (pktattr->pattr_af != AF_INET && pktattr->pattr_af != AF_INET6))
769 /* verify that pattr_hdr is within the mbuf data */
770 for (m0 = m; m0 != NULL; m0 = m0->m_next) {
771 if ((pktattr->pattr_hdr >= m0->m_data) &&
772 (pktattr->pattr_hdr < m0->m_data + m0->m_len))
776 /* ick, pattr_hdr is stale */
777 pktattr->pattr_af = AF_UNSPEC;
779 kprintf("write_dsfield: can't locate header!\n");
784 if (pktattr->pattr_af == AF_INET) {
785 struct ip *ip = (struct ip *)pktattr->pattr_hdr;
790 return; /* version mismatch! */
792 dsfield |= old & 3; /* leave CU bits */
795 ip->ip_tos = dsfield;
797 * update checksum (from RFC1624)
798 * HC' = ~(~HC + ~m + m')
800 sum = ~ntohs(ip->ip_sum) & 0xffff;
801 sum += 0xff00 + (~old & 0xff) + dsfield;
802 sum = (sum >> 16) + (sum & 0xffff);
803 sum += (sum >> 16); /* add carry */
805 ip->ip_sum = htons(~sum & 0xffff);
808 else if (pktattr->pattr_af == AF_INET6) {
809 struct ip6_hdr *ip6 = (struct ip6_hdr *)pktattr->pattr_hdr;
812 flowlabel = ntohl(ip6->ip6_flow);
813 if ((flowlabel >> 28) != 6)
814 return; /* version mismatch! */
815 flowlabel = (flowlabel & 0xf03fffff) | (dsfield << 20);
816 ip6->ip6_flow = htonl(flowlabel);
822 * high resolution clock support taking advantage of a machine dependent
823 * high resolution time counter (e.g., timestamp counter of intel pentium).
825 * - 64-bit-long monotonically-increasing counter
826 * - frequency range is 100M-4GHz (CPU speed)
828 /* if pcc is not available or disabled, emulate 256MHz using microtime() */
829 #define MACHCLK_SHIFT 8
831 static int machclk_usepcc;
832 uint64_t machclk_freq = 0;
833 uint32_t machclk_per_tick = 0;
838 callout_init_mp(&tbr_callout);
839 netmsg_init(&tbr_timeout_netmsg, NULL, &netisr_adone_rport,
840 MSGF_PRIORITY, tbr_timeout_dispatch);
848 #if defined(__i386__) || defined(__x86_64__)
855 if (!machclk_usepcc) {
856 /* emulate 256MHz using microtime() */
857 machclk_freq = 1000000LLU << MACHCLK_SHIFT;
858 machclk_per_tick = machclk_freq / hz;
860 kprintf("altq: emulate %juHz cpu clock\n",
861 (uintmax_t)machclk_freq);
867 * If the clock frequency (of Pentium TSC) is accessible,
870 #ifdef _RDTSC_SUPPORTED_
872 machclk_freq = (uint64_t)tsc_frequency;
876 * If we don't know the clock frequency, measure it.
878 if (machclk_freq == 0) {
880 struct timeval tv_start, tv_end;
881 uint64_t start, end, diff;
884 microtime(&tv_start);
885 start = read_machclk();
886 timo = hz; /* 1 sec */
887 tsleep(&wait, PCATCH, "init_machclk", timo);
889 end = read_machclk();
890 diff = (uint64_t)(tv_end.tv_sec - tv_start.tv_sec) * 1000000
891 + tv_end.tv_usec - tv_start.tv_usec;
893 machclk_freq = (end - start) * 1000000 / diff;
896 machclk_per_tick = machclk_freq / hz;
899 kprintf("altq: CPU clock: %juHz\n", (uintmax_t)machclk_freq);
908 if (machclk_usepcc) {
909 #ifdef _RDTSC_SUPPORTED_
912 panic("read_machclk");
918 val = (((uint64_t)(tv.tv_sec - boottime.tv_sec) * 1000000
919 + tv.tv_usec) << MACHCLK_SHIFT);