if: Multiple TX queue support step 3 of 3; map CPUID to subqueue
[dragonfly.git] / sys / net / altq / altq_cbq.c
... / ...
CommitLineData
1/* $KAME: altq_cbq.c,v 1.20 2004/04/17 10:54:48 kjc Exp $ */
2/* $DragonFly: src/sys/net/altq/altq_cbq.c,v 1.7 2008/05/14 11:59:23 sephe Exp $ */
3
4/*
5 * Copyright (c) Sun Microsystems, Inc. 1993-1998 All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 *
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the SMCC Technology
21 * Development Group at Sun Microsystems, Inc.
22 *
23 * 4. The name of the Sun Microsystems, Inc nor may not be used to endorse or
24 * promote products derived from this software without specific prior
25 * written permission.
26 *
27 * SUN MICROSYSTEMS DOES NOT CLAIM MERCHANTABILITY OF THIS SOFTWARE OR THE
28 * SUITABILITY OF THIS SOFTWARE FOR ANY PARTICULAR PURPOSE. The software is
29 * provided "as is" without express or implied warranty of any kind.
30 *
31 * These notices must be retained in any copies of any part of this software.
32 */
33
34#include "opt_altq.h"
35#include "opt_inet.h"
36#include "opt_inet6.h"
37
38#ifdef ALTQ_CBQ /* cbq is enabled by ALTQ_CBQ option in opt_altq.h */
39
40#include <sys/param.h>
41#include <sys/malloc.h>
42#include <sys/mbuf.h>
43#include <sys/socket.h>
44#include <sys/systm.h>
45#include <sys/proc.h>
46#include <sys/callout.h>
47#include <sys/errno.h>
48#include <sys/time.h>
49#include <sys/thread.h>
50
51#include <net/if.h>
52#include <net/ifq_var.h>
53#include <netinet/in.h>
54
55#include <net/pf/pfvar.h>
56#include <net/altq/altq.h>
57#include <net/altq/altq_cbq.h>
58
59#include <sys/thread2.h>
60
61#define CBQ_SUBQ_INDEX ALTQ_SUBQ_INDEX_DEFAULT
62#define CBQ_LOCK(ifq) \
63 ALTQ_SQ_LOCK(&(ifq)->altq_subq[CBQ_SUBQ_INDEX])
64#define CBQ_UNLOCK(ifq) \
65 ALTQ_SQ_UNLOCK(&(ifq)->altq_subq[CBQ_SUBQ_INDEX])
66#define CBQ_ASSERT_LOCKED(ifq) \
67 ALTQ_SQ_ASSERT_LOCKED(&(ifq)->altq_subq[CBQ_SUBQ_INDEX])
68
69/*
70 * Forward Declarations.
71 */
72static int cbq_class_destroy(cbq_state_t *, struct rm_class *);
73static struct rm_class *clh_to_clp(cbq_state_t *, uint32_t);
74static int cbq_clear_interface(cbq_state_t *);
75static int cbq_request(struct ifaltq_subque *, int, void *);
76static int cbq_enqueue(struct ifaltq_subque *, struct mbuf *,
77 struct altq_pktattr *);
78static struct mbuf *cbq_dequeue(struct ifaltq_subque *, struct mbuf *,
79 int);
80static void cbqrestart(struct ifaltq *);
81static void get_class_stats(class_stats_t *, struct rm_class *);
82static void cbq_purge(cbq_state_t *);
83
84/*
85 * int
86 * cbq_class_destroy(cbq_mod_state_t *, struct rm_class *) - This
87 * function destroys a given traffic class. Before destroying
88 * the class, all traffic for that class is released.
89 */
90static int
91cbq_class_destroy(cbq_state_t *cbqp, struct rm_class *cl)
92{
93 int i;
94
95 /* delete the class */
96 rmc_delete_class(&cbqp->ifnp, cl);
97
98 /*
99 * free the class handle
100 */
101 for (i = 0; i < CBQ_MAX_CLASSES; i++)
102 if (cbqp->cbq_class_tbl[i] == cl)
103 cbqp->cbq_class_tbl[i] = NULL;
104
105 if (cl == cbqp->ifnp.root_)
106 cbqp->ifnp.root_ = NULL;
107 if (cl == cbqp->ifnp.default_)
108 cbqp->ifnp.default_ = NULL;
109 return (0);
110}
111
112/* convert class handle to class pointer */
113static struct rm_class *
114clh_to_clp(cbq_state_t *cbqp, uint32_t chandle)
115{
116 int i;
117 struct rm_class *cl;
118
119 if (chandle == 0)
120 return (NULL);
121 /*
122 * first, try optimistically the slot matching the lower bits of
123 * the handle. if it fails, do the linear table search.
124 */
125 i = chandle % CBQ_MAX_CLASSES;
126 if ((cl = cbqp->cbq_class_tbl[i]) != NULL &&
127 cl->stats_.handle == chandle)
128 return (cl);
129 for (i = 0; i < CBQ_MAX_CLASSES; i++)
130 if ((cl = cbqp->cbq_class_tbl[i]) != NULL &&
131 cl->stats_.handle == chandle)
132 return (cl);
133 return (NULL);
134}
135
136static int
137cbq_clear_interface(cbq_state_t *cbqp)
138{
139 int again, i;
140 struct rm_class *cl;
141
142 /* clear out the classes now */
143 do {
144 again = 0;
145 for (i = 0; i < CBQ_MAX_CLASSES; i++) {
146 if ((cl = cbqp->cbq_class_tbl[i]) != NULL) {
147 if (is_a_parent_class(cl))
148 again++;
149 else {
150 cbq_class_destroy(cbqp, cl);
151 cbqp->cbq_class_tbl[i] = NULL;
152 if (cl == cbqp->ifnp.root_)
153 cbqp->ifnp.root_ = NULL;
154 if (cl == cbqp->ifnp.default_)
155 cbqp->ifnp.default_ = NULL;
156 }
157 }
158 }
159 } while (again);
160
161 return (0);
162}
163
164static int
165cbq_request(struct ifaltq_subque *ifsq, int req, void *arg)
166{
167 struct ifaltq *ifq = ifsq->ifsq_altq;
168 cbq_state_t *cbqp = (cbq_state_t *)ifq->altq_disc;
169
170 crit_enter();
171 switch (req) {
172 case ALTRQ_PURGE:
173 if (ifsq_get_index(ifsq) == CBQ_SUBQ_INDEX) {
174 cbq_purge(cbqp);
175 } else {
176 /*
177 * Race happened, the unrelated subqueue was
178 * picked during the packet scheduler transition.
179 */
180 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
181 }
182 break;
183 }
184 crit_exit();
185 return (0);
186}
187
188/* copy the stats info in rm_class to class_states_t */
189static void
190get_class_stats(class_stats_t *statsp, struct rm_class *cl)
191{
192 statsp->xmit_cnt = cl->stats_.xmit_cnt;
193 statsp->drop_cnt = cl->stats_.drop_cnt;
194 statsp->over = cl->stats_.over;
195 statsp->borrows = cl->stats_.borrows;
196 statsp->overactions = cl->stats_.overactions;
197 statsp->delays = cl->stats_.delays;
198
199 statsp->depth = cl->depth_;
200 statsp->priority = cl->pri_;
201 statsp->maxidle = cl->maxidle_;
202 statsp->minidle = cl->minidle_;
203 statsp->offtime = cl->offtime_;
204 statsp->qmax = qlimit(cl->q_);
205 statsp->ns_per_byte = cl->ns_per_byte_;
206 statsp->wrr_allot = cl->w_allotment_;
207 statsp->qcnt = qlen(cl->q_);
208 statsp->avgidle = cl->avgidle_;
209
210 statsp->qtype = qtype(cl->q_);
211#ifdef ALTQ_RED
212 if (q_is_red(cl->q_))
213 red_getstats(cl->red_, &statsp->red[0]);
214#endif
215#ifdef ALTQ_RIO
216 if (q_is_rio(cl->q_))
217 rio_getstats((rio_t *)cl->red_, &statsp->red[0]);
218#endif
219}
220
221int
222cbq_pfattach(struct pf_altq *a, struct ifaltq *ifq)
223{
224 return altq_attach(ifq, ALTQT_CBQ, a->altq_disc, ifq_mapsubq_default,
225 cbq_enqueue, cbq_dequeue, cbq_request, NULL, NULL);
226}
227
228int
229cbq_add_altq(struct pf_altq *a)
230{
231 cbq_state_t *cbqp;
232 struct ifnet *ifp;
233
234 if ((ifp = ifunit(a->ifname)) == NULL)
235 return (EINVAL);
236 if (!ifq_is_ready(&ifp->if_snd))
237 return (ENODEV);
238
239 /* allocate and initialize cbq_state_t */
240 cbqp = kmalloc(sizeof(*cbqp), M_ALTQ, M_WAITOK | M_ZERO);
241 callout_init(&cbqp->cbq_callout);
242 cbqp->cbq_qlen = 0;
243 cbqp->ifnp.ifq_ = &ifp->if_snd; /* keep the ifq */
244 ifq_purge_all(&ifp->if_snd);
245
246 /* keep the state in pf_altq */
247 a->altq_disc = cbqp;
248
249 return (0);
250}
251
252int
253cbq_remove_altq(struct pf_altq *a)
254{
255 cbq_state_t *cbqp;
256
257 if ((cbqp = a->altq_disc) == NULL)
258 return (EINVAL);
259 a->altq_disc = NULL;
260
261 cbq_clear_interface(cbqp);
262
263 if (cbqp->ifnp.default_)
264 cbq_class_destroy(cbqp, cbqp->ifnp.default_);
265 if (cbqp->ifnp.root_)
266 cbq_class_destroy(cbqp, cbqp->ifnp.root_);
267
268 /* deallocate cbq_state_t */
269 kfree(cbqp, M_ALTQ);
270
271 return (0);
272}
273
274static int
275cbq_add_queue_locked(struct pf_altq *a, cbq_state_t *cbqp)
276{
277 struct rm_class *borrow, *parent;
278 struct rm_class *cl;
279 struct cbq_opts *opts;
280 int i;
281
282 KKASSERT(a->qid != 0);
283
284 /*
285 * find a free slot in the class table. if the slot matching
286 * the lower bits of qid is free, use this slot. otherwise,
287 * use the first free slot.
288 */
289 i = a->qid % CBQ_MAX_CLASSES;
290 if (cbqp->cbq_class_tbl[i] != NULL) {
291 for (i = 0; i < CBQ_MAX_CLASSES; i++)
292 if (cbqp->cbq_class_tbl[i] == NULL)
293 break;
294 if (i == CBQ_MAX_CLASSES)
295 return (EINVAL);
296 }
297
298 opts = &a->pq_u.cbq_opts;
299 /* check parameters */
300 if (a->priority >= CBQ_MAXPRI)
301 return (EINVAL);
302
303 /* Get pointers to parent and borrow classes. */
304 parent = clh_to_clp(cbqp, a->parent_qid);
305 if (opts->flags & CBQCLF_BORROW)
306 borrow = parent;
307 else
308 borrow = NULL;
309
310 /*
311 * A class must borrow from it's parent or it can not
312 * borrow at all. Hence, borrow can be null.
313 */
314 if (parent == NULL && (opts->flags & CBQCLF_ROOTCLASS) == 0) {
315 kprintf("cbq_add_queue: no parent class!\n");
316 return (EINVAL);
317 }
318
319 if ((borrow != parent) && (borrow != NULL)) {
320 kprintf("cbq_add_class: borrow class != parent\n");
321 return (EINVAL);
322 }
323
324 /*
325 * check parameters
326 */
327 switch (opts->flags & CBQCLF_CLASSMASK) {
328 case CBQCLF_ROOTCLASS:
329 if (parent != NULL)
330 return (EINVAL);
331 if (cbqp->ifnp.root_)
332 return (EINVAL);
333 break;
334 case CBQCLF_DEFCLASS:
335 if (cbqp->ifnp.default_)
336 return (EINVAL);
337 break;
338 case 0:
339 if (a->qid == 0)
340 return (EINVAL);
341 break;
342 default:
343 /* more than two flags bits set */
344 return (EINVAL);
345 }
346
347 /*
348 * create a class. if this is a root class, initialize the
349 * interface.
350 */
351 if ((opts->flags & CBQCLF_CLASSMASK) == CBQCLF_ROOTCLASS) {
352 rmc_init(cbqp->ifnp.ifq_, &cbqp->ifnp, opts->ns_per_byte,
353 cbqrestart, a->qlimit, RM_MAXQUEUED,
354 opts->maxidle, opts->minidle, opts->offtime,
355 opts->flags);
356 cl = cbqp->ifnp.root_;
357 } else {
358 cl = rmc_newclass(a->priority,
359 &cbqp->ifnp, opts->ns_per_byte,
360 rmc_delay_action, a->qlimit, parent, borrow,
361 opts->maxidle, opts->minidle, opts->offtime,
362 opts->pktsize, opts->flags);
363 }
364 if (cl == NULL)
365 return (ENOMEM);
366
367 /* return handle to user space. */
368 cl->stats_.handle = a->qid;
369 cl->stats_.depth = cl->depth_;
370
371 /* save the allocated class */
372 cbqp->cbq_class_tbl[i] = cl;
373
374 if ((opts->flags & CBQCLF_CLASSMASK) == CBQCLF_DEFCLASS)
375 cbqp->ifnp.default_ = cl;
376
377 return (0);
378}
379
380int
381cbq_add_queue(struct pf_altq *a)
382{
383 cbq_state_t *cbqp;
384 struct ifaltq *ifq;
385 int error;
386
387 if (a->qid == 0)
388 return (EINVAL);
389
390 /* XXX not MP safe */
391 if ((cbqp = a->altq_disc) == NULL)
392 return (EINVAL);
393 ifq = cbqp->ifnp.ifq_;
394
395 CBQ_LOCK(ifq);
396 error = cbq_add_queue_locked(a, cbqp);
397 CBQ_UNLOCK(ifq);
398
399 return error;
400}
401
402static int
403cbq_remove_queue_locked(struct pf_altq *a, cbq_state_t *cbqp)
404{
405 struct rm_class *cl;
406 int i;
407
408 if ((cl = clh_to_clp(cbqp, a->qid)) == NULL)
409 return (EINVAL);
410
411 /* if we are a parent class, then return an error. */
412 if (is_a_parent_class(cl))
413 return (EINVAL);
414
415 /* delete the class */
416 rmc_delete_class(&cbqp->ifnp, cl);
417
418 /*
419 * free the class handle
420 */
421 for (i = 0; i < CBQ_MAX_CLASSES; i++)
422 if (cbqp->cbq_class_tbl[i] == cl) {
423 cbqp->cbq_class_tbl[i] = NULL;
424 if (cl == cbqp->ifnp.root_)
425 cbqp->ifnp.root_ = NULL;
426 if (cl == cbqp->ifnp.default_)
427 cbqp->ifnp.default_ = NULL;
428 break;
429 }
430
431 return (0);
432}
433
434int
435cbq_remove_queue(struct pf_altq *a)
436{
437 cbq_state_t *cbqp;
438 struct ifaltq *ifq;
439 int error;
440
441 /* XXX not MP safe */
442 if ((cbqp = a->altq_disc) == NULL)
443 return (EINVAL);
444 ifq = cbqp->ifnp.ifq_;
445
446 CBQ_LOCK(ifq);
447 error = cbq_remove_queue_locked(a, cbqp);
448 CBQ_UNLOCK(ifq);
449
450 return error;
451}
452
453int
454cbq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
455{
456 cbq_state_t *cbqp;
457 struct rm_class *cl;
458 class_stats_t stats;
459 int error = 0;
460 struct ifaltq *ifq;
461
462 if (*nbytes < sizeof(stats))
463 return (EINVAL);
464
465 /* XXX not MP safe */
466 if ((cbqp = altq_lookup(a->ifname, ALTQT_CBQ)) == NULL)
467 return (EBADF);
468 ifq = cbqp->ifnp.ifq_;
469
470 CBQ_LOCK(ifq);
471
472 if ((cl = clh_to_clp(cbqp, a->qid)) == NULL) {
473 CBQ_UNLOCK(ifq);
474 return (EINVAL);
475 }
476
477 get_class_stats(&stats, cl);
478
479 CBQ_UNLOCK(ifq);
480
481 if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0)
482 return (error);
483 *nbytes = sizeof(stats);
484 return (0);
485}
486
487/*
488 * int
489 * cbq_enqueue(struct ifaltq_subqueue *ifq, struct mbuf *m,
490 * struct altq_pktattr *pattr)
491 * - Queue data packets.
492 *
493 * cbq_enqueue is set to ifp->if_altqenqueue and called by an upper
494 * layer (e.g. ether_output). cbq_enqueue queues the given packet
495 * to the cbq, then invokes the driver's start routine.
496 *
497 * Returns: 0 if the queueing is successful.
498 * ENOBUFS if a packet dropping occurred as a result of
499 * the queueing.
500 */
501
502static int
503cbq_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m,
504 struct altq_pktattr *pktattr __unused)
505{
506 struct ifaltq *ifq = ifsq->ifsq_altq;
507 cbq_state_t *cbqp = (cbq_state_t *)ifq->altq_disc;
508 struct rm_class *cl;
509 int len;
510
511 if (ifsq_get_index(ifsq) != CBQ_SUBQ_INDEX) {
512 /*
513 * Race happened, the unrelated subqueue was
514 * picked during the packet scheduler transition.
515 */
516 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
517 m_freem(m);
518 return (ENOBUFS);
519 }
520
521 /* grab class set by classifier */
522 if ((m->m_flags & M_PKTHDR) == 0) {
523 /* should not happen */
524 if_printf(ifq->altq_ifp, "altq: packet does not have pkthdr\n");
525 m_freem(m);
526 return (ENOBUFS);
527 }
528 if (m->m_pkthdr.fw_flags & PF_MBUF_STRUCTURE)
529 cl = clh_to_clp(cbqp, m->m_pkthdr.pf.qid);
530 else
531 cl = NULL;
532 if (cl == NULL) {
533 cl = cbqp->ifnp.default_;
534 if (cl == NULL) {
535 m_freem(m);
536 return (ENOBUFS);
537 }
538 }
539 crit_enter();
540 cl->pktattr_ = NULL;
541 len = m_pktlen(m);
542 if (rmc_queue_packet(cl, m) != 0) {
543 /* drop occurred. some mbuf was freed in rmc_queue_packet. */
544 PKTCNTR_ADD(&cl->stats_.drop_cnt, len);
545 crit_exit();
546 return (ENOBUFS);
547 }
548
549 /* successfully queued. */
550 ++cbqp->cbq_qlen;
551 ++ifsq->ifq_len;
552 crit_exit();
553 return (0);
554}
555
556static struct mbuf *
557cbq_dequeue(struct ifaltq_subque *ifsq, struct mbuf *mpolled, int op)
558{
559 struct ifaltq *ifq = ifsq->ifsq_altq;
560 cbq_state_t *cbqp = (cbq_state_t *)ifq->altq_disc;
561 struct mbuf *m;
562
563 if (ifsq_get_index(ifsq) != CBQ_SUBQ_INDEX) {
564 /*
565 * Race happened, the unrelated subqueue was
566 * picked during the packet scheduler transition.
567 */
568 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
569 return NULL;
570 }
571
572 crit_enter();
573 m = rmc_dequeue_next(&cbqp->ifnp, op);
574
575 if (m && op == ALTDQ_REMOVE) {
576 --cbqp->cbq_qlen; /* decrement # of packets in cbq */
577 --ifsq->ifq_len;
578
579 /* Update the class. */
580 rmc_update_class_util(&cbqp->ifnp);
581 }
582 crit_exit();
583 KKASSERT(mpolled == NULL || mpolled == m);
584 return (m);
585}
586
587/*
588 * void
589 * cbqrestart(queue_t *) - Restart sending of data.
590 * called from rmc_restart in a critical section via timeout after waking up
591 * a suspended class.
592 * Returns: NONE
593 */
594
595static void
596cbqrestart(struct ifaltq *ifq)
597{
598 cbq_state_t *cbqp;
599
600 CBQ_ASSERT_LOCKED(ifq);
601
602 if (!ifq_is_enabled(ifq))
603 /* cbq must have been detached */
604 return;
605
606 if ((cbqp = (cbq_state_t *)ifq->altq_disc) == NULL)
607 /* should not happen */
608 return;
609
610 if (cbqp->cbq_qlen > 0) {
611 struct ifnet *ifp = ifq->altq_ifp;
612 struct ifaltq_subque *ifsq = &ifq->altq_subq[CBQ_SUBQ_INDEX];
613
614 /* Release the altq lock to avoid deadlock */
615 CBQ_UNLOCK(ifq);
616
617 ifnet_serialize_tx(ifp, ifsq);
618 if (ifp->if_start && !ifsq_is_oactive(ifsq))
619 (*ifp->if_start)(ifp, ifsq);
620 ifnet_deserialize_tx(ifp, ifsq);
621
622 CBQ_LOCK(ifq);
623 }
624}
625
626static void
627cbq_purge(cbq_state_t *cbqp)
628{
629 struct rm_class *cl;
630 int i;
631 for (i = 0; i < CBQ_MAX_CLASSES; i++) {
632 if ((cl = cbqp->cbq_class_tbl[i]) != NULL)
633 rmc_dropall(cl);
634 }
635 if (ifq_is_enabled(cbqp->ifnp.ifq_))
636 cbqp->ifnp.ifq_->altq_subq[CBQ_SUBQ_INDEX].ifq_len = 0;
637}
638
639#endif /* ALTQ_CBQ */