1 /* $KAME: altq_cbq.c,v 1.20 2004/04/17 10:54:48 kjc Exp $ */
2 /* $DragonFly: src/sys/net/altq/altq_cbq.c,v 1.7 2008/05/14 11:59:23 sephe Exp $ */
5 * Copyright (c) Sun Microsystems, Inc. 1993-1998 All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the SMCC Technology
21 * Development Group at Sun Microsystems, Inc.
23 * 4. The name of the Sun Microsystems, Inc nor may not be used to endorse or
24 * promote products derived from this software without specific prior
27 * SUN MICROSYSTEMS DOES NOT CLAIM MERCHANTABILITY OF THIS SOFTWARE OR THE
28 * SUITABILITY OF THIS SOFTWARE FOR ANY PARTICULAR PURPOSE. The software is
29 * provided "as is" without express or implied warranty of any kind.
31 * These notices must be retained in any copies of any part of this software.
36 #include "opt_inet6.h"
38 #ifdef ALTQ_CBQ /* cbq is enabled by ALTQ_CBQ option in opt_altq.h */
40 #include <sys/param.h>
41 #include <sys/malloc.h>
43 #include <sys/socket.h>
44 #include <sys/systm.h>
46 #include <sys/callout.h>
47 #include <sys/errno.h>
49 #include <sys/thread.h>
52 #include <net/ifq_var.h>
53 #include <netinet/in.h>
55 #include <net/pf/pfvar.h>
56 #include <net/altq/altq.h>
57 #include <net/altq/altq_cbq.h>
59 #include <sys/thread2.h>
61 #define CBQ_SUBQ_INDEX ALTQ_SUBQ_INDEX_DEFAULT
62 #define CBQ_LOCK(ifq) \
63 ALTQ_SQ_LOCK(&(ifq)->altq_subq[CBQ_SUBQ_INDEX])
64 #define CBQ_UNLOCK(ifq) \
65 ALTQ_SQ_UNLOCK(&(ifq)->altq_subq[CBQ_SUBQ_INDEX])
66 #define CBQ_ASSERT_LOCKED(ifq) \
67 ALTQ_SQ_ASSERT_LOCKED(&(ifq)->altq_subq[CBQ_SUBQ_INDEX])
70 * Forward Declarations.
72 static int cbq_class_destroy(cbq_state_t *, struct rm_class *);
73 static struct rm_class *clh_to_clp(cbq_state_t *, uint32_t);
74 static int cbq_clear_interface(cbq_state_t *);
75 static int cbq_request(struct ifaltq_subque *, int, void *);
76 static int cbq_enqueue(struct ifaltq_subque *, struct mbuf *,
77 struct altq_pktattr *);
78 static struct mbuf *cbq_dequeue(struct ifaltq_subque *, struct mbuf *,
80 static void cbqrestart(struct ifaltq *);
81 static void get_class_stats(class_stats_t *, struct rm_class *);
82 static void cbq_purge(cbq_state_t *);
86 * cbq_class_destroy(cbq_mod_state_t *, struct rm_class *) - This
87 * function destroys a given traffic class. Before destroying
88 * the class, all traffic for that class is released.
91 cbq_class_destroy(cbq_state_t *cbqp, struct rm_class *cl)
95 /* delete the class */
96 rmc_delete_class(&cbqp->ifnp, cl);
99 * free the class handle
101 for (i = 0; i < CBQ_MAX_CLASSES; i++)
102 if (cbqp->cbq_class_tbl[i] == cl)
103 cbqp->cbq_class_tbl[i] = NULL;
105 if (cl == cbqp->ifnp.root_)
106 cbqp->ifnp.root_ = NULL;
107 if (cl == cbqp->ifnp.default_)
108 cbqp->ifnp.default_ = NULL;
112 /* convert class handle to class pointer */
113 static struct rm_class *
114 clh_to_clp(cbq_state_t *cbqp, uint32_t chandle)
122 * first, try optimistically the slot matching the lower bits of
123 * the handle. if it fails, do the linear table search.
125 i = chandle % CBQ_MAX_CLASSES;
126 if ((cl = cbqp->cbq_class_tbl[i]) != NULL &&
127 cl->stats_.handle == chandle)
129 for (i = 0; i < CBQ_MAX_CLASSES; i++)
130 if ((cl = cbqp->cbq_class_tbl[i]) != NULL &&
131 cl->stats_.handle == chandle)
137 cbq_clear_interface(cbq_state_t *cbqp)
142 /* clear out the classes now */
145 for (i = 0; i < CBQ_MAX_CLASSES; i++) {
146 if ((cl = cbqp->cbq_class_tbl[i]) != NULL) {
147 if (is_a_parent_class(cl))
150 cbq_class_destroy(cbqp, cl);
151 cbqp->cbq_class_tbl[i] = NULL;
152 if (cl == cbqp->ifnp.root_)
153 cbqp->ifnp.root_ = NULL;
154 if (cl == cbqp->ifnp.default_)
155 cbqp->ifnp.default_ = NULL;
165 cbq_request(struct ifaltq_subque *ifsq, int req, void *arg)
167 struct ifaltq *ifq = ifsq->ifsq_altq;
168 cbq_state_t *cbqp = (cbq_state_t *)ifq->altq_disc;
173 if (ifsq_get_index(ifsq) == CBQ_SUBQ_INDEX) {
177 * Race happened, the unrelated subqueue was
178 * picked during the packet scheduler transition.
180 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
188 /* copy the stats info in rm_class to class_states_t */
190 get_class_stats(class_stats_t *statsp, struct rm_class *cl)
192 statsp->xmit_cnt = cl->stats_.xmit_cnt;
193 statsp->drop_cnt = cl->stats_.drop_cnt;
194 statsp->over = cl->stats_.over;
195 statsp->borrows = cl->stats_.borrows;
196 statsp->overactions = cl->stats_.overactions;
197 statsp->delays = cl->stats_.delays;
199 statsp->depth = cl->depth_;
200 statsp->priority = cl->pri_;
201 statsp->maxidle = cl->maxidle_;
202 statsp->minidle = cl->minidle_;
203 statsp->offtime = cl->offtime_;
204 statsp->qmax = qlimit(cl->q_);
205 statsp->ns_per_byte = cl->ns_per_byte_;
206 statsp->wrr_allot = cl->w_allotment_;
207 statsp->qcnt = qlen(cl->q_);
208 statsp->avgidle = cl->avgidle_;
210 statsp->qtype = qtype(cl->q_);
212 if (q_is_red(cl->q_))
213 red_getstats(cl->red_, &statsp->red[0]);
216 if (q_is_rio(cl->q_))
217 rio_getstats((rio_t *)cl->red_, &statsp->red[0]);
222 cbq_pfattach(struct pf_altq *a, struct ifaltq *ifq)
224 return altq_attach(ifq, ALTQT_CBQ, a->altq_disc, ifq_mapsubq_default,
225 cbq_enqueue, cbq_dequeue, cbq_request, NULL, NULL);
229 cbq_add_altq(struct pf_altq *a)
234 if ((ifp = ifunit(a->ifname)) == NULL)
236 if (!ifq_is_ready(&ifp->if_snd))
239 /* allocate and initialize cbq_state_t */
240 cbqp = kmalloc(sizeof(*cbqp), M_ALTQ, M_WAITOK | M_ZERO);
241 callout_init(&cbqp->cbq_callout);
243 cbqp->ifnp.ifq_ = &ifp->if_snd; /* keep the ifq */
244 ifq_purge_all(&ifp->if_snd);
246 /* keep the state in pf_altq */
253 cbq_remove_altq(struct pf_altq *a)
257 if ((cbqp = a->altq_disc) == NULL)
261 cbq_clear_interface(cbqp);
263 if (cbqp->ifnp.default_)
264 cbq_class_destroy(cbqp, cbqp->ifnp.default_);
265 if (cbqp->ifnp.root_)
266 cbq_class_destroy(cbqp, cbqp->ifnp.root_);
268 /* deallocate cbq_state_t */
275 cbq_add_queue_locked(struct pf_altq *a, cbq_state_t *cbqp)
277 struct rm_class *borrow, *parent;
279 struct cbq_opts *opts;
282 KKASSERT(a->qid != 0);
285 * find a free slot in the class table. if the slot matching
286 * the lower bits of qid is free, use this slot. otherwise,
287 * use the first free slot.
289 i = a->qid % CBQ_MAX_CLASSES;
290 if (cbqp->cbq_class_tbl[i] != NULL) {
291 for (i = 0; i < CBQ_MAX_CLASSES; i++)
292 if (cbqp->cbq_class_tbl[i] == NULL)
294 if (i == CBQ_MAX_CLASSES)
298 opts = &a->pq_u.cbq_opts;
299 /* check parameters */
300 if (a->priority >= CBQ_MAXPRI)
303 /* Get pointers to parent and borrow classes. */
304 parent = clh_to_clp(cbqp, a->parent_qid);
305 if (opts->flags & CBQCLF_BORROW)
311 * A class must borrow from it's parent or it can not
312 * borrow at all. Hence, borrow can be null.
314 if (parent == NULL && (opts->flags & CBQCLF_ROOTCLASS) == 0) {
315 kprintf("cbq_add_queue: no parent class!\n");
319 if ((borrow != parent) && (borrow != NULL)) {
320 kprintf("cbq_add_class: borrow class != parent\n");
327 switch (opts->flags & CBQCLF_CLASSMASK) {
328 case CBQCLF_ROOTCLASS:
331 if (cbqp->ifnp.root_)
334 case CBQCLF_DEFCLASS:
335 if (cbqp->ifnp.default_)
343 /* more than two flags bits set */
348 * create a class. if this is a root class, initialize the
351 if ((opts->flags & CBQCLF_CLASSMASK) == CBQCLF_ROOTCLASS) {
352 rmc_init(cbqp->ifnp.ifq_, &cbqp->ifnp, opts->ns_per_byte,
353 cbqrestart, a->qlimit, RM_MAXQUEUED,
354 opts->maxidle, opts->minidle, opts->offtime,
356 cl = cbqp->ifnp.root_;
358 cl = rmc_newclass(a->priority,
359 &cbqp->ifnp, opts->ns_per_byte,
360 rmc_delay_action, a->qlimit, parent, borrow,
361 opts->maxidle, opts->minidle, opts->offtime,
362 opts->pktsize, opts->flags);
367 /* return handle to user space. */
368 cl->stats_.handle = a->qid;
369 cl->stats_.depth = cl->depth_;
371 /* save the allocated class */
372 cbqp->cbq_class_tbl[i] = cl;
374 if ((opts->flags & CBQCLF_CLASSMASK) == CBQCLF_DEFCLASS)
375 cbqp->ifnp.default_ = cl;
381 cbq_add_queue(struct pf_altq *a)
390 /* XXX not MP safe */
391 if ((cbqp = a->altq_disc) == NULL)
393 ifq = cbqp->ifnp.ifq_;
396 error = cbq_add_queue_locked(a, cbqp);
403 cbq_remove_queue_locked(struct pf_altq *a, cbq_state_t *cbqp)
408 if ((cl = clh_to_clp(cbqp, a->qid)) == NULL)
411 /* if we are a parent class, then return an error. */
412 if (is_a_parent_class(cl))
415 /* delete the class */
416 rmc_delete_class(&cbqp->ifnp, cl);
419 * free the class handle
421 for (i = 0; i < CBQ_MAX_CLASSES; i++)
422 if (cbqp->cbq_class_tbl[i] == cl) {
423 cbqp->cbq_class_tbl[i] = NULL;
424 if (cl == cbqp->ifnp.root_)
425 cbqp->ifnp.root_ = NULL;
426 if (cl == cbqp->ifnp.default_)
427 cbqp->ifnp.default_ = NULL;
435 cbq_remove_queue(struct pf_altq *a)
441 /* XXX not MP safe */
442 if ((cbqp = a->altq_disc) == NULL)
444 ifq = cbqp->ifnp.ifq_;
447 error = cbq_remove_queue_locked(a, cbqp);
454 cbq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
462 if (*nbytes < sizeof(stats))
465 /* XXX not MP safe */
466 if ((cbqp = altq_lookup(a->ifname, ALTQT_CBQ)) == NULL)
468 ifq = cbqp->ifnp.ifq_;
472 if ((cl = clh_to_clp(cbqp, a->qid)) == NULL) {
477 get_class_stats(&stats, cl);
481 if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0)
483 *nbytes = sizeof(stats);
489 * cbq_enqueue(struct ifaltq_subqueue *ifq, struct mbuf *m,
490 * struct altq_pktattr *pattr)
491 * - Queue data packets.
493 * cbq_enqueue is set to ifp->if_altqenqueue and called by an upper
494 * layer (e.g. ether_output). cbq_enqueue queues the given packet
495 * to the cbq, then invokes the driver's start routine.
497 * Returns: 0 if the queueing is successful.
498 * ENOBUFS if a packet dropping occurred as a result of
503 cbq_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m,
504 struct altq_pktattr *pktattr __unused)
506 struct ifaltq *ifq = ifsq->ifsq_altq;
507 cbq_state_t *cbqp = (cbq_state_t *)ifq->altq_disc;
511 if (ifsq_get_index(ifsq) != CBQ_SUBQ_INDEX) {
513 * Race happened, the unrelated subqueue was
514 * picked during the packet scheduler transition.
516 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
521 /* grab class set by classifier */
522 if ((m->m_flags & M_PKTHDR) == 0) {
523 /* should not happen */
524 if_printf(ifq->altq_ifp, "altq: packet does not have pkthdr\n");
528 if (m->m_pkthdr.fw_flags & PF_MBUF_STRUCTURE)
529 cl = clh_to_clp(cbqp, m->m_pkthdr.pf.qid);
533 cl = cbqp->ifnp.default_;
542 if (rmc_queue_packet(cl, m) != 0) {
543 /* drop occurred. some mbuf was freed in rmc_queue_packet. */
544 PKTCNTR_ADD(&cl->stats_.drop_cnt, len);
549 /* successfully queued. */
557 cbq_dequeue(struct ifaltq_subque *ifsq, struct mbuf *mpolled, int op)
559 struct ifaltq *ifq = ifsq->ifsq_altq;
560 cbq_state_t *cbqp = (cbq_state_t *)ifq->altq_disc;
563 if (ifsq_get_index(ifsq) != CBQ_SUBQ_INDEX) {
565 * Race happened, the unrelated subqueue was
566 * picked during the packet scheduler transition.
568 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
573 m = rmc_dequeue_next(&cbqp->ifnp, op);
575 if (m && op == ALTDQ_REMOVE) {
576 --cbqp->cbq_qlen; /* decrement # of packets in cbq */
579 /* Update the class. */
580 rmc_update_class_util(&cbqp->ifnp);
583 KKASSERT(mpolled == NULL || mpolled == m);
589 * cbqrestart(queue_t *) - Restart sending of data.
590 * called from rmc_restart in a critical section via timeout after waking up
596 cbqrestart(struct ifaltq *ifq)
600 CBQ_ASSERT_LOCKED(ifq);
602 if (!ifq_is_enabled(ifq))
603 /* cbq must have been detached */
606 if ((cbqp = (cbq_state_t *)ifq->altq_disc) == NULL)
607 /* should not happen */
610 if (cbqp->cbq_qlen > 0) {
611 struct ifnet *ifp = ifq->altq_ifp;
612 struct ifaltq_subque *ifsq = &ifq->altq_subq[CBQ_SUBQ_INDEX];
614 /* Release the altq lock to avoid deadlock */
617 ifsq_serialize_hw(ifsq);
618 if (ifp->if_start && !ifsq_is_oactive(ifsq))
619 (*ifp->if_start)(ifp, ifsq);
620 ifsq_deserialize_hw(ifsq);
627 cbq_purge(cbq_state_t *cbqp)
631 for (i = 0; i < CBQ_MAX_CLASSES; i++) {
632 if ((cl = cbqp->cbq_class_tbl[i]) != NULL)
635 if (ifq_is_enabled(cbqp->ifnp.ifq_))
636 cbqp->ifnp.ifq_->altq_subq[CBQ_SUBQ_INDEX].ifq_len = 0;
639 #endif /* ALTQ_CBQ */