1 /* $KAME: altq_cbq.c,v 1.20 2004/04/17 10:54:48 kjc Exp $ */
2 /* $DragonFly: src/sys/net/altq/altq_cbq.c,v 1.7 2008/05/14 11:59:23 sephe Exp $ */
5 * Copyright (c) Sun Microsystems, Inc. 1993-1998 All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the SMCC Technology
21 * Development Group at Sun Microsystems, Inc.
23 * 4. The name of the Sun Microsystems, Inc nor may not be used to endorse or
24 * promote products derived from this software without specific prior
27 * SUN MICROSYSTEMS DOES NOT CLAIM MERCHANTABILITY OF THIS SOFTWARE OR THE
28 * SUITABILITY OF THIS SOFTWARE FOR ANY PARTICULAR PURPOSE. The software is
29 * provided "as is" without express or implied warranty of any kind.
31 * These notices must be retained in any copies of any part of this software.
36 #include "opt_inet6.h"
38 #ifdef ALTQ_CBQ /* cbq is enabled by ALTQ_CBQ option in opt_altq.h */
40 #include <sys/param.h>
41 #include <sys/malloc.h>
43 #include <sys/socket.h>
44 #include <sys/systm.h>
46 #include <sys/callout.h>
47 #include <sys/errno.h>
49 #include <sys/thread.h>
52 #include <net/ifq_var.h>
53 #include <netinet/in.h>
55 #include <net/pf/pfvar.h>
56 #include <net/altq/altq.h>
57 #include <net/altq/altq_cbq.h>
59 #include <sys/thread2.h>
61 #define CBQ_SUBQ_INDEX ALTQ_SUBQ_INDEX_DEFAULT
62 #define CBQ_LOCK(ifq) \
63 ALTQ_SQ_LOCK(&(ifq)->altq_subq[CBQ_SUBQ_INDEX])
64 #define CBQ_UNLOCK(ifq) \
65 ALTQ_SQ_UNLOCK(&(ifq)->altq_subq[CBQ_SUBQ_INDEX])
66 #define CBQ_ASSERT_LOCKED(ifq) \
67 ALTQ_SQ_ASSERT_LOCKED(&(ifq)->altq_subq[CBQ_SUBQ_INDEX])
70 * Forward Declarations.
72 static int cbq_class_destroy(cbq_state_t *, struct rm_class *);
73 static struct rm_class *clh_to_clp(cbq_state_t *, uint32_t);
74 static int cbq_clear_interface(cbq_state_t *);
75 static int cbq_request(struct ifaltq_subque *, int, void *);
76 static int cbq_enqueue(struct ifaltq_subque *, struct mbuf *,
77 struct altq_pktattr *);
78 static struct mbuf *cbq_dequeue(struct ifaltq_subque *, int);
79 static void cbqrestart(struct ifaltq *);
80 static void get_class_stats(class_stats_t *, struct rm_class *);
81 static void cbq_purge(cbq_state_t *);
85 * cbq_class_destroy(cbq_mod_state_t *, struct rm_class *) - This
86 * function destroys a given traffic class. Before destroying
87 * the class, all traffic for that class is released.
90 cbq_class_destroy(cbq_state_t *cbqp, struct rm_class *cl)
94 /* delete the class */
95 rmc_delete_class(&cbqp->ifnp, cl);
98 * free the class handle
100 for (i = 0; i < CBQ_MAX_CLASSES; i++)
101 if (cbqp->cbq_class_tbl[i] == cl)
102 cbqp->cbq_class_tbl[i] = NULL;
104 if (cl == cbqp->ifnp.root_)
105 cbqp->ifnp.root_ = NULL;
106 if (cl == cbqp->ifnp.default_)
107 cbqp->ifnp.default_ = NULL;
111 /* convert class handle to class pointer */
112 static struct rm_class *
113 clh_to_clp(cbq_state_t *cbqp, uint32_t chandle)
121 * first, try optimistically the slot matching the lower bits of
122 * the handle. if it fails, do the linear table search.
124 i = chandle % CBQ_MAX_CLASSES;
125 if ((cl = cbqp->cbq_class_tbl[i]) != NULL &&
126 cl->stats_.handle == chandle)
128 for (i = 0; i < CBQ_MAX_CLASSES; i++)
129 if ((cl = cbqp->cbq_class_tbl[i]) != NULL &&
130 cl->stats_.handle == chandle)
136 cbq_clear_interface(cbq_state_t *cbqp)
141 /* clear out the classes now */
144 for (i = 0; i < CBQ_MAX_CLASSES; i++) {
145 if ((cl = cbqp->cbq_class_tbl[i]) != NULL) {
146 if (is_a_parent_class(cl))
149 cbq_class_destroy(cbqp, cl);
150 cbqp->cbq_class_tbl[i] = NULL;
151 if (cl == cbqp->ifnp.root_)
152 cbqp->ifnp.root_ = NULL;
153 if (cl == cbqp->ifnp.default_)
154 cbqp->ifnp.default_ = NULL;
164 cbq_request(struct ifaltq_subque *ifsq, int req, void *arg)
166 struct ifaltq *ifq = ifsq->ifsq_altq;
167 cbq_state_t *cbqp = (cbq_state_t *)ifq->altq_disc;
172 if (ifsq_get_index(ifsq) == CBQ_SUBQ_INDEX) {
176 * Race happened, the unrelated subqueue was
177 * picked during the packet scheduler transition.
179 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
187 /* copy the stats info in rm_class to class_states_t */
189 get_class_stats(class_stats_t *statsp, struct rm_class *cl)
191 statsp->xmit_cnt = cl->stats_.xmit_cnt;
192 statsp->drop_cnt = cl->stats_.drop_cnt;
193 statsp->over = cl->stats_.over;
194 statsp->borrows = cl->stats_.borrows;
195 statsp->overactions = cl->stats_.overactions;
196 statsp->delays = cl->stats_.delays;
198 statsp->depth = cl->depth_;
199 statsp->priority = cl->pri_;
200 statsp->maxidle = cl->maxidle_;
201 statsp->minidle = cl->minidle_;
202 statsp->offtime = cl->offtime_;
203 statsp->qmax = qlimit(cl->q_);
204 statsp->ns_per_byte = cl->ns_per_byte_;
205 statsp->wrr_allot = cl->w_allotment_;
206 statsp->qcnt = qlen(cl->q_);
207 statsp->avgidle = cl->avgidle_;
209 statsp->qtype = qtype(cl->q_);
211 if (q_is_red(cl->q_))
212 red_getstats(cl->red_, &statsp->red[0]);
215 if (q_is_rio(cl->q_))
216 rio_getstats((rio_t *)cl->red_, &statsp->red[0]);
221 cbq_pfattach(struct pf_altq *a, struct ifaltq *ifq)
223 return altq_attach(ifq, ALTQT_CBQ, a->altq_disc, ifq_mapsubq_default,
224 cbq_enqueue, cbq_dequeue, cbq_request, NULL, NULL);
228 cbq_add_altq(struct pf_altq *a)
233 if ((ifp = ifunit(a->ifname)) == NULL)
235 if (!ifq_is_ready(&ifp->if_snd))
238 /* allocate and initialize cbq_state_t */
239 cbqp = kmalloc(sizeof(*cbqp), M_ALTQ, M_WAITOK | M_ZERO);
240 callout_init(&cbqp->cbq_callout);
242 cbqp->ifnp.ifq_ = &ifp->if_snd; /* keep the ifq */
243 ifq_purge_all(&ifp->if_snd);
245 /* keep the state in pf_altq */
252 cbq_remove_altq(struct pf_altq *a)
256 if ((cbqp = a->altq_disc) == NULL)
260 cbq_clear_interface(cbqp);
262 if (cbqp->ifnp.default_)
263 cbq_class_destroy(cbqp, cbqp->ifnp.default_);
264 if (cbqp->ifnp.root_)
265 cbq_class_destroy(cbqp, cbqp->ifnp.root_);
267 /* deallocate cbq_state_t */
274 cbq_add_queue_locked(struct pf_altq *a, cbq_state_t *cbqp)
276 struct rm_class *borrow, *parent;
278 struct cbq_opts *opts;
281 KKASSERT(a->qid != 0);
284 * find a free slot in the class table. if the slot matching
285 * the lower bits of qid is free, use this slot. otherwise,
286 * use the first free slot.
288 i = a->qid % CBQ_MAX_CLASSES;
289 if (cbqp->cbq_class_tbl[i] != NULL) {
290 for (i = 0; i < CBQ_MAX_CLASSES; i++)
291 if (cbqp->cbq_class_tbl[i] == NULL)
293 if (i == CBQ_MAX_CLASSES)
297 opts = &a->pq_u.cbq_opts;
298 /* check parameters */
299 if (a->priority >= CBQ_MAXPRI)
302 /* Get pointers to parent and borrow classes. */
303 parent = clh_to_clp(cbqp, a->parent_qid);
304 if (opts->flags & CBQCLF_BORROW)
310 * A class must borrow from it's parent or it can not
311 * borrow at all. Hence, borrow can be null.
313 if (parent == NULL && (opts->flags & CBQCLF_ROOTCLASS) == 0) {
314 kprintf("cbq_add_queue: no parent class!\n");
318 if ((borrow != parent) && (borrow != NULL)) {
319 kprintf("cbq_add_class: borrow class != parent\n");
326 switch (opts->flags & CBQCLF_CLASSMASK) {
327 case CBQCLF_ROOTCLASS:
330 if (cbqp->ifnp.root_)
333 case CBQCLF_DEFCLASS:
334 if (cbqp->ifnp.default_)
342 /* more than two flags bits set */
347 * create a class. if this is a root class, initialize the
350 if ((opts->flags & CBQCLF_CLASSMASK) == CBQCLF_ROOTCLASS) {
351 rmc_init(cbqp->ifnp.ifq_, &cbqp->ifnp, opts->ns_per_byte,
352 cbqrestart, a->qlimit, RM_MAXQUEUED,
353 opts->maxidle, opts->minidle, opts->offtime,
355 cl = cbqp->ifnp.root_;
357 cl = rmc_newclass(a->priority,
358 &cbqp->ifnp, opts->ns_per_byte,
359 rmc_delay_action, a->qlimit, parent, borrow,
360 opts->maxidle, opts->minidle, opts->offtime,
361 opts->pktsize, opts->flags);
366 /* return handle to user space. */
367 cl->stats_.handle = a->qid;
368 cl->stats_.depth = cl->depth_;
370 /* save the allocated class */
371 cbqp->cbq_class_tbl[i] = cl;
373 if ((opts->flags & CBQCLF_CLASSMASK) == CBQCLF_DEFCLASS)
374 cbqp->ifnp.default_ = cl;
380 cbq_add_queue(struct pf_altq *a)
389 /* XXX not MP safe */
390 if ((cbqp = a->altq_disc) == NULL)
392 ifq = cbqp->ifnp.ifq_;
395 error = cbq_add_queue_locked(a, cbqp);
402 cbq_remove_queue_locked(struct pf_altq *a, cbq_state_t *cbqp)
407 if ((cl = clh_to_clp(cbqp, a->qid)) == NULL)
410 /* if we are a parent class, then return an error. */
411 if (is_a_parent_class(cl))
414 /* delete the class */
415 rmc_delete_class(&cbqp->ifnp, cl);
418 * free the class handle
420 for (i = 0; i < CBQ_MAX_CLASSES; i++)
421 if (cbqp->cbq_class_tbl[i] == cl) {
422 cbqp->cbq_class_tbl[i] = NULL;
423 if (cl == cbqp->ifnp.root_)
424 cbqp->ifnp.root_ = NULL;
425 if (cl == cbqp->ifnp.default_)
426 cbqp->ifnp.default_ = NULL;
434 cbq_remove_queue(struct pf_altq *a)
440 /* XXX not MP safe */
441 if ((cbqp = a->altq_disc) == NULL)
443 ifq = cbqp->ifnp.ifq_;
446 error = cbq_remove_queue_locked(a, cbqp);
453 cbq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
461 if (*nbytes < sizeof(stats))
464 /* XXX not MP safe */
465 if ((cbqp = altq_lookup(a->ifname, ALTQT_CBQ)) == NULL)
467 ifq = cbqp->ifnp.ifq_;
471 if ((cl = clh_to_clp(cbqp, a->qid)) == NULL) {
476 get_class_stats(&stats, cl);
480 if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0)
482 *nbytes = sizeof(stats);
488 * cbq_enqueue(struct ifaltq_subqueue *ifq, struct mbuf *m,
489 * struct altq_pktattr *pattr)
490 * - Queue data packets.
492 * cbq_enqueue is set to ifp->if_altqenqueue and called by an upper
493 * layer (e.g. ether_output). cbq_enqueue queues the given packet
494 * to the cbq, then invokes the driver's start routine.
496 * Returns: 0 if the queueing is successful.
497 * ENOBUFS if a packet dropping occurred as a result of
502 cbq_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m,
503 struct altq_pktattr *pktattr __unused)
505 struct ifaltq *ifq = ifsq->ifsq_altq;
506 cbq_state_t *cbqp = (cbq_state_t *)ifq->altq_disc;
510 if (ifsq_get_index(ifsq) != CBQ_SUBQ_INDEX) {
512 * Race happened, the unrelated subqueue was
513 * picked during the packet scheduler transition.
515 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
520 /* grab class set by classifier */
522 if (m->m_pkthdr.fw_flags & PF_MBUF_STRUCTURE)
523 cl = clh_to_clp(cbqp, m->m_pkthdr.pf.qid);
527 cl = cbqp->ifnp.default_;
536 if (rmc_queue_packet(cl, m) != 0) {
537 /* drop occurred. some mbuf was freed in rmc_queue_packet. */
538 PKTCNTR_ADD(&cl->stats_.drop_cnt, len);
543 /* successfully queued. */
545 ALTQ_SQ_PKTCNT_INC(ifsq);
551 cbq_dequeue(struct ifaltq_subque *ifsq, int op)
553 struct ifaltq *ifq = ifsq->ifsq_altq;
554 cbq_state_t *cbqp = (cbq_state_t *)ifq->altq_disc;
557 if (ifsq_get_index(ifsq) != CBQ_SUBQ_INDEX) {
559 * Race happened, the unrelated subqueue was
560 * picked during the packet scheduler transition.
562 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
567 m = rmc_dequeue_next(&cbqp->ifnp, op);
569 if (m && op == ALTDQ_REMOVE) {
570 --cbqp->cbq_qlen; /* decrement # of packets in cbq */
571 ALTQ_SQ_PKTCNT_DEC(ifsq);
573 /* Update the class. */
574 rmc_update_class_util(&cbqp->ifnp);
582 * cbqrestart(queue_t *) - Restart sending of data.
583 * called from rmc_restart in a critical section via timeout after waking up
589 cbqrestart(struct ifaltq *ifq)
593 CBQ_ASSERT_LOCKED(ifq);
595 if (!ifq_is_enabled(ifq))
596 /* cbq must have been detached */
599 if ((cbqp = (cbq_state_t *)ifq->altq_disc) == NULL)
600 /* should not happen */
603 if (cbqp->cbq_qlen > 0) {
604 struct ifnet *ifp = ifq->altq_ifp;
605 struct ifaltq_subque *ifsq = &ifq->altq_subq[CBQ_SUBQ_INDEX];
607 /* Release the altq lock to avoid deadlock */
610 ifsq_serialize_hw(ifsq);
611 if (ifp->if_start && !ifsq_is_oactive(ifsq))
612 (*ifp->if_start)(ifp, ifsq);
613 ifsq_deserialize_hw(ifsq);
620 cbq_purge(cbq_state_t *cbqp)
624 for (i = 0; i < CBQ_MAX_CLASSES; i++) {
625 if ((cl = cbqp->cbq_class_tbl[i]) != NULL)
628 if (ifq_is_enabled(cbqp->ifnp.ifq_))
629 ALTQ_SQ_CNTR_RESET(&cbqp->ifnp.ifq_->altq_subq[CBQ_SUBQ_INDEX]);
632 #endif /* ALTQ_CBQ */