if: Multiple TX queue support step 3 of 3; map CPUID to subqueue
[dragonfly.git] / sys / net / altq / altq_priq.c
CommitLineData
4d723e5a 1/* $KAME: altq_priq.c,v 1.12 2004/04/17 10:54:48 kjc Exp $ */
9db4b353 2/* $DragonFly: src/sys/net/altq/altq_priq.c,v 1.9 2008/05/14 11:59:23 sephe Exp $ */
4d723e5a
JS
3
4/*
5 * Copyright (C) 2000-2003
6 * Sony Computer Science Laboratories Inc. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29/*
30 * priority queue
31 */
32
33#include "opt_altq.h"
34#include "opt_inet.h"
35#include "opt_inet6.h"
36
37#ifdef ALTQ_PRIQ /* priq is enabled by ALTQ_PRIQ option in opt_altq.h */
38
39#include <sys/param.h>
40#include <sys/malloc.h>
41#include <sys/mbuf.h>
42#include <sys/socket.h>
43#include <sys/sockio.h>
44#include <sys/systm.h>
45#include <sys/proc.h>
46#include <sys/errno.h>
47#include <sys/kernel.h>
48#include <sys/queue.h>
e9cb6d99 49#include <sys/thread.h>
4d723e5a
JS
50
51#include <net/if.h>
52#include <net/ifq_var.h>
53#include <netinet/in.h>
54
55#include <net/pf/pfvar.h>
56#include <net/altq/altq.h>
57#include <net/altq/altq_priq.h>
58
e9cb6d99
MD
59#include <sys/thread2.h>
60
f0a26983
SZ
61#define PRIQ_SUBQ_INDEX ALTQ_SUBQ_INDEX_DEFAULT
62#define PRIQ_LOCK(ifq) \
63 ALTQ_SQ_LOCK(&(ifq)->altq_subq[PRIQ_SUBQ_INDEX])
64#define PRIQ_UNLOCK(ifq) \
65 ALTQ_SQ_UNLOCK(&(ifq)->altq_subq[PRIQ_SUBQ_INDEX])
66
4d723e5a
JS
67/*
68 * function prototypes
69 */
70static int priq_clear_interface(struct priq_if *);
f0a26983 71static int priq_request(struct ifaltq_subque *, int, void *);
4d723e5a
JS
72static void priq_purge(struct priq_if *);
73static struct priq_class *priq_class_create(struct priq_if *, int, int, int, int);
74static int priq_class_destroy(struct priq_class *);
f0a26983
SZ
75static int priq_enqueue(struct ifaltq_subque *, struct mbuf *,
76 struct altq_pktattr *);
77static struct mbuf *priq_dequeue(struct ifaltq_subque *, struct mbuf *, int);
4d723e5a
JS
78
79static int priq_addq(struct priq_class *, struct mbuf *);
80static struct mbuf *priq_getq(struct priq_class *);
81static struct mbuf *priq_pollq(struct priq_class *);
82static void priq_purgeq(struct priq_class *);
83
84static void get_class_stats(struct priq_classstats *, struct priq_class *);
85static struct priq_class *clh_to_clp(struct priq_if *, uint32_t);
86
87int
9db4b353 88priq_pfattach(struct pf_altq *a, struct ifaltq *ifq)
4d723e5a 89{
2cc2f639 90 return altq_attach(ifq, ALTQT_PRIQ, a->altq_disc, ifq_mapsubq_default,
4d723e5a 91 priq_enqueue, priq_dequeue, priq_request, NULL, NULL);
4d723e5a
JS
92}
93
94int
95priq_add_altq(struct pf_altq *a)
96{
97 struct priq_if *pif;
98 struct ifnet *ifp;
99
100 if ((ifp = ifunit(a->ifname)) == NULL)
101 return (EINVAL);
102 if (!ifq_is_ready(&ifp->if_snd))
103 return (ENODEV);
104
efda3bd0 105 pif = kmalloc(sizeof(*pif), M_ALTQ, M_WAITOK | M_ZERO);
4d723e5a
JS
106 pif->pif_bandwidth = a->ifbandwidth;
107 pif->pif_maxpri = -1;
108 pif->pif_ifq = &ifp->if_snd;
9275f515 109 ifq_purge_all(&ifp->if_snd);
4d723e5a
JS
110
111 /* keep the state in pf_altq */
112 a->altq_disc = pif;
113
114 return (0);
115}
116
117int
118priq_remove_altq(struct pf_altq *a)
119{
120 struct priq_if *pif;
121
122 if ((pif = a->altq_disc) == NULL)
123 return (EINVAL);
124 a->altq_disc = NULL;
125
126 priq_clear_interface(pif);
127
efda3bd0 128 kfree(pif, M_ALTQ);
4d723e5a
JS
129 return (0);
130}
131
9db4b353
SZ
132static int
133priq_add_queue_locked(struct pf_altq *a, struct priq_if *pif)
4d723e5a 134{
4d723e5a
JS
135 struct priq_class *cl;
136
9db4b353
SZ
137 KKASSERT(a->priority < PRIQ_MAXPRI);
138 KKASSERT(a->qid != 0);
4d723e5a 139
4d723e5a
JS
140 if (pif->pif_classes[a->priority] != NULL)
141 return (EBUSY);
142 if (clh_to_clp(pif, a->qid) != NULL)
143 return (EBUSY);
144
145 cl = priq_class_create(pif, a->priority, a->qlimit,
146 a->pq_u.priq_opts.flags, a->qid);
147 if (cl == NULL)
148 return (ENOMEM);
149
150 return (0);
151}
152
153int
9db4b353 154priq_add_queue(struct pf_altq *a)
4d723e5a
JS
155{
156 struct priq_if *pif;
9db4b353
SZ
157 struct ifaltq *ifq;
158 int error;
4d723e5a 159
9db4b353
SZ
160 /* check parameters */
161 if (a->priority >= PRIQ_MAXPRI)
162 return (EINVAL);
163 if (a->qid == 0)
164 return (EINVAL);
165
166 /* XXX not MP safe */
4d723e5a
JS
167 if ((pif = a->altq_disc) == NULL)
168 return (EINVAL);
9db4b353
SZ
169 ifq = pif->pif_ifq;
170
f0a26983 171 PRIQ_LOCK(ifq);
9db4b353 172 error = priq_add_queue_locked(a, pif);
f0a26983 173 PRIQ_UNLOCK(ifq);
9db4b353
SZ
174
175 return error;
176}
177
178static int
179priq_remove_queue_locked(struct pf_altq *a, struct priq_if *pif)
180{
181 struct priq_class *cl;
4d723e5a
JS
182
183 if ((cl = clh_to_clp(pif, a->qid)) == NULL)
184 return (EINVAL);
185
186 return (priq_class_destroy(cl));
187}
188
189int
9db4b353
SZ
190priq_remove_queue(struct pf_altq *a)
191{
192 struct priq_if *pif;
193 struct ifaltq *ifq;
194 int error;
195
196 /* XXX not MF safe */
197 if ((pif = a->altq_disc) == NULL)
198 return (EINVAL);
199 ifq = pif->pif_ifq;
200
f0a26983 201 PRIQ_LOCK(ifq);
9db4b353 202 error = priq_remove_queue_locked(a, pif);
f0a26983 203 PRIQ_UNLOCK(ifq);
9db4b353
SZ
204
205 return error;
206}
207
208int
4d723e5a
JS
209priq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
210{
211 struct priq_if *pif;
212 struct priq_class *cl;
213 struct priq_classstats stats;
9db4b353 214 struct ifaltq *ifq;
4d723e5a
JS
215 int error = 0;
216
9db4b353
SZ
217 if (*nbytes < sizeof(stats))
218 return (EINVAL);
219
220 /* XXX not MP safe */
4d723e5a
JS
221 if ((pif = altq_lookup(a->ifname, ALTQT_PRIQ)) == NULL)
222 return (EBADF);
9db4b353 223 ifq = pif->pif_ifq;
4d723e5a 224
f0a26983 225 PRIQ_LOCK(ifq);
4d723e5a 226
9db4b353 227 if ((cl = clh_to_clp(pif, a->qid)) == NULL) {
f0a26983 228 PRIQ_UNLOCK(ifq);
4d723e5a 229 return (EINVAL);
9db4b353 230 }
4d723e5a
JS
231
232 get_class_stats(&stats, cl);
233
f0a26983 234 PRIQ_UNLOCK(ifq);
9db4b353 235
4d723e5a
JS
236 if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0)
237 return (error);
238 *nbytes = sizeof(stats);
239 return (0);
240}
241
242/*
243 * bring the interface back to the initial state by discarding
244 * all the filters and classes.
245 */
246static int
247priq_clear_interface(struct priq_if *pif)
248{
249 struct priq_class *cl;
250 int pri;
251
252 /* clear out the classes */
253 for (pri = 0; pri <= pif->pif_maxpri; pri++) {
254 if ((cl = pif->pif_classes[pri]) != NULL)
255 priq_class_destroy(cl);
256 }
257
258 return (0);
259}
260
261static int
f0a26983 262priq_request(struct ifaltq_subque *ifsq, int req, void *arg)
4d723e5a 263{
f0a26983 264 struct ifaltq *ifq = ifsq->ifsq_altq;
4d723e5a
JS
265 struct priq_if *pif = (struct priq_if *)ifq->altq_disc;
266
e9cb6d99 267 crit_enter();
4d723e5a
JS
268 switch (req) {
269 case ALTRQ_PURGE:
f0a26983
SZ
270 if (ifsq_get_index(ifsq) == PRIQ_SUBQ_INDEX) {
271 priq_purge(pif);
272 } else {
273 /*
274 * Race happened, the unrelated subqueue was
275 * picked during the packet scheduler transition.
276 */
277 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
278 }
4d723e5a
JS
279 break;
280 }
e9cb6d99 281 crit_exit();
4d723e5a
JS
282 return (0);
283}
284
285/* discard all the queued packets on the interface */
286static void
287priq_purge(struct priq_if *pif)
288{
289 struct priq_class *cl;
290 int pri;
291
292 for (pri = 0; pri <= pif->pif_maxpri; pri++) {
293 if ((cl = pif->pif_classes[pri]) != NULL && !qempty(cl->cl_q))
294 priq_purgeq(cl);
295 }
296 if (ifq_is_enabled(pif->pif_ifq))
f0a26983 297 pif->pif_ifq->altq_subq[PRIQ_SUBQ_INDEX].ifq_len = 0;
4d723e5a
JS
298}
299
300static struct priq_class *
301priq_class_create(struct priq_if *pif, int pri, int qlimit, int flags, int qid)
302{
303 struct priq_class *cl;
4d723e5a
JS
304
305#ifndef ALTQ_RED
306 if (flags & PRCF_RED) {
307#ifdef ALTQ_DEBUG
4b1cf444 308 kprintf("priq_class_create: RED not configured for PRIQ!\n");
4d723e5a
JS
309#endif
310 return (NULL);
311 }
312#endif
313
314 if ((cl = pif->pif_classes[pri]) != NULL) {
315 /* modify the class instead of creating a new one */
0b31d406 316 crit_enter();
4d723e5a
JS
317 if (!qempty(cl->cl_q))
318 priq_purgeq(cl);
0b31d406 319 crit_exit();
4d723e5a
JS
320#ifdef ALTQ_RIO
321 if (q_is_rio(cl->cl_q))
322 rio_destroy((rio_t *)cl->cl_red);
323#endif
324#ifdef ALTQ_RED
325 if (q_is_red(cl->cl_q))
326 red_destroy(cl->cl_red);
327#endif
328 } else {
efda3bd0
MD
329 cl = kmalloc(sizeof(*cl), M_ALTQ, M_WAITOK | M_ZERO);
330 cl->cl_q = kmalloc(sizeof(*cl->cl_q), M_ALTQ, M_WAITOK | M_ZERO);
4d723e5a
JS
331 }
332
333 pif->pif_classes[pri] = cl;
334 if (flags & PRCF_DEFAULTCLASS)
335 pif->pif_default = cl;
336 if (qlimit == 0)
337 qlimit = 50; /* use default */
338 qlimit(cl->cl_q) = qlimit;
339 qtype(cl->cl_q) = Q_DROPTAIL;
340 qlen(cl->cl_q) = 0;
341 cl->cl_flags = flags;
342 cl->cl_pri = pri;
343 if (pri > pif->pif_maxpri)
344 pif->pif_maxpri = pri;
345 cl->cl_pif = pif;
346 cl->cl_handle = qid;
347
348#ifdef ALTQ_RED
349 if (flags & (PRCF_RED|PRCF_RIO)) {
350 int red_flags, red_pkttime;
351
352 red_flags = 0;
353 if (flags & PRCF_ECN)
354 red_flags |= REDF_ECN;
355#ifdef ALTQ_RIO
356 if (flags & PRCF_CLEARDSCP)
357 red_flags |= RIOF_CLEARDSCP;
358#endif
359 if (pif->pif_bandwidth < 8)
360 red_pkttime = 1000 * 1000 * 1000; /* 1 sec */
361 else
362 red_pkttime = (int64_t)pif->pif_ifq->altq_ifp->if_mtu
363 * 1000 * 1000 * 1000 / (pif->pif_bandwidth / 8);
364#ifdef ALTQ_RIO
365 if (flags & PRCF_RIO) {
366 cl->cl_red = (red_t *)rio_alloc(0, NULL,
367 red_flags, red_pkttime);
368 if (cl->cl_red != NULL)
369 qtype(cl->cl_q) = Q_RIO;
370 } else
371#endif
372 if (flags & PRCF_RED) {
373 cl->cl_red = red_alloc(0, 0,
374 qlimit(cl->cl_q) * 10/100,
375 qlimit(cl->cl_q) * 30/100,
376 red_flags, red_pkttime);
377 if (cl->cl_red != NULL)
378 qtype(cl->cl_q) = Q_RED;
379 }
380 }
381#endif /* ALTQ_RED */
382
383 return (cl);
384}
385
386static int
387priq_class_destroy(struct priq_class *cl)
388{
389 struct priq_if *pif;
0b31d406 390 int pri;
4d723e5a 391
0b31d406 392 crit_enter();
4d723e5a
JS
393
394 if (!qempty(cl->cl_q))
395 priq_purgeq(cl);
396
397 pif = cl->cl_pif;
398 pif->pif_classes[cl->cl_pri] = NULL;
399 if (pif->pif_maxpri == cl->cl_pri) {
400 for (pri = cl->cl_pri; pri >= 0; pri--)
401 if (pif->pif_classes[pri] != NULL) {
402 pif->pif_maxpri = pri;
403 break;
404 }
405 if (pri < 0)
406 pif->pif_maxpri = -1;
407 }
0b31d406 408 crit_exit();
4d723e5a
JS
409
410 if (cl->cl_red != NULL) {
411#ifdef ALTQ_RIO
412 if (q_is_rio(cl->cl_q))
413 rio_destroy((rio_t *)cl->cl_red);
414#endif
415#ifdef ALTQ_RED
416 if (q_is_red(cl->cl_q))
417 red_destroy(cl->cl_red);
418#endif
419 }
efda3bd0
MD
420 kfree(cl->cl_q, M_ALTQ);
421 kfree(cl, M_ALTQ);
4d723e5a
JS
422 return (0);
423}
424
425/*
426 * priq_enqueue is an enqueue function to be registered to
427 * (*altq_enqueue) in struct ifaltq.
428 */
429static int
f0a26983
SZ
430priq_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m,
431 struct altq_pktattr *pktattr)
4d723e5a 432{
f0a26983 433 struct ifaltq *ifq = ifsq->ifsq_altq;
4d723e5a
JS
434 struct priq_if *pif = (struct priq_if *)ifq->altq_disc;
435 struct priq_class *cl;
e9cb6d99 436 int error;
4d723e5a
JS
437 int len;
438
f0a26983
SZ
439 if (ifsq_get_index(ifsq) != PRIQ_SUBQ_INDEX) {
440 /*
441 * Race happened, the unrelated subqueue was
442 * picked during the packet scheduler transition.
443 */
444 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
445 m_freem(m);
446 return ENOBUFS;
447 }
448
e9cb6d99
MD
449 crit_enter();
450
4d723e5a
JS
451 /* grab class set by classifier */
452 if ((m->m_flags & M_PKTHDR) == 0) {
453 /* should not happen */
454 if_printf(ifq->altq_ifp, "altq: packet does not have pkthdr\n");
455 m_freem(m);
e9cb6d99
MD
456 error = ENOBUFS;
457 goto done;
4d723e5a 458 }
e9cb6d99 459
315a7da3
JL
460 if (m->m_pkthdr.fw_flags & PF_MBUF_STRUCTURE)
461 cl = clh_to_clp(pif, m->m_pkthdr.pf.qid);
4d723e5a
JS
462 else
463 cl = NULL;
464 if (cl == NULL) {
465 cl = pif->pif_default;
466 if (cl == NULL) {
467 m_freem(m);
e9cb6d99
MD
468 error = ENOBUFS;
469 goto done;
4d723e5a
JS
470 }
471 }
472 cl->cl_pktattr = NULL;
473 len = m_pktlen(m);
474 if (priq_addq(cl, m) != 0) {
475 /* drop occurred. mbuf was freed in priq_addq. */
476 PKTCNTR_ADD(&cl->cl_dropcnt, len);
e9cb6d99
MD
477 error = ENOBUFS;
478 goto done;
4d723e5a 479 }
f0a26983 480 ifsq->ifq_len++;
e9cb6d99
MD
481 error = 0;
482done:
483 crit_exit();
484 return (error);
4d723e5a
JS
485}
486
487/*
488 * priq_dequeue is a dequeue function to be registered to
489 * (*altq_dequeue) in struct ifaltq.
490 *
491 * note: ALTDQ_POLL returns the next packet without removing the packet
492 * from the queue. ALTDQ_REMOVE is a normal dequeue operation.
493 * ALTDQ_REMOVE must return the same packet if called immediately
494 * after ALTDQ_POLL.
495 */
496static struct mbuf *
f0a26983 497priq_dequeue(struct ifaltq_subque *ifsq, struct mbuf *mpolled, int op)
4d723e5a 498{
f0a26983 499 struct ifaltq *ifq = ifsq->ifsq_altq;
4d723e5a
JS
500 struct priq_if *pif = (struct priq_if *)ifq->altq_disc;
501 struct priq_class *cl;
502 struct mbuf *m;
503 int pri;
504
f0a26983
SZ
505 if (ifsq_get_index(ifsq) != PRIQ_SUBQ_INDEX) {
506 /*
507 * Race happened, the unrelated subqueue was
508 * picked during the packet scheduler transition.
509 */
510 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
511 return NULL;
512 }
513
514 if (ifsq_is_empty(ifsq)) {
4d723e5a 515 /* no packet in the queue */
d2c71fa0 516 KKASSERT(mpolled == NULL);
4d723e5a
JS
517 return (NULL);
518 }
519
e9cb6d99
MD
520 crit_enter();
521 m = NULL;
4d723e5a
JS
522 for (pri = pif->pif_maxpri; pri >= 0; pri--) {
523 if ((cl = pif->pif_classes[pri]) != NULL && !qempty(cl->cl_q)) {
e9cb6d99
MD
524 if (op == ALTDQ_POLL) {
525 m = priq_pollq(cl);
526 break;
527 }
4d723e5a
JS
528
529 m = priq_getq(cl);
530 if (m != NULL) {
f0a26983 531 ifsq->ifq_len--;
4d723e5a
JS
532 if (qempty(cl->cl_q))
533 cl->cl_period++;
534 PKTCNTR_ADD(&cl->cl_xmitcnt, m_pktlen(m));
535 }
e9cb6d99 536 break;
4d723e5a
JS
537 }
538 }
e9cb6d99 539 crit_exit();
d2c71fa0 540 KKASSERT(mpolled == NULL || mpolled == m);
e9cb6d99 541 return (m);
4d723e5a
JS
542}
543
544static int
545priq_addq(struct priq_class *cl, struct mbuf *m)
546{
547#ifdef ALTQ_RIO
548 if (q_is_rio(cl->cl_q))
549 return rio_addq((rio_t *)cl->cl_red, cl->cl_q, m,
550 cl->cl_pktattr);
551#endif
552#ifdef ALTQ_RED
553 if (q_is_red(cl->cl_q))
554 return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr);
555#endif
556 if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) {
557 m_freem(m);
558 return (-1);
559 }
560
561 if (cl->cl_flags & PRCF_CLEARDSCP)
562 write_dsfield(m, cl->cl_pktattr, 0);
563
564 _addq(cl->cl_q, m);
565
566 return (0);
567}
568
569static struct mbuf *
570priq_getq(struct priq_class *cl)
571{
572#ifdef ALTQ_RIO
573 if (q_is_rio(cl->cl_q))
574 return rio_getq((rio_t *)cl->cl_red, cl->cl_q);
575#endif
576#ifdef ALTQ_RED
577 if (q_is_red(cl->cl_q))
578 return red_getq(cl->cl_red, cl->cl_q);
579#endif
580 return _getq(cl->cl_q);
581}
582
583static struct mbuf *
584priq_pollq(struct priq_class *cl)
585{
586 return qhead(cl->cl_q);
587}
588
589static void
590priq_purgeq(struct priq_class *cl)
591{
592 struct mbuf *m;
593
594 if (qempty(cl->cl_q))
595 return;
596
597 while ((m = _getq(cl->cl_q)) != NULL) {
598 PKTCNTR_ADD(&cl->cl_dropcnt, m_pktlen(m));
599 m_freem(m);
600 }
601 KKASSERT(qlen(cl->cl_q) == 0);
602}
603
604static void
605get_class_stats(struct priq_classstats *sp, struct priq_class *cl)
606{
607 sp->class_handle = cl->cl_handle;
608 sp->qlength = qlen(cl->cl_q);
609 sp->qlimit = qlimit(cl->cl_q);
610 sp->period = cl->cl_period;
611 sp->xmitcnt = cl->cl_xmitcnt;
612 sp->dropcnt = cl->cl_dropcnt;
613
614 sp->qtype = qtype(cl->cl_q);
615#ifdef ALTQ_RED
616 if (q_is_red(cl->cl_q))
617 red_getstats(cl->cl_red, &sp->red[0]);
618#endif
619#ifdef ALTQ_RIO
620 if (q_is_rio(cl->cl_q))
621 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]);
622#endif
623}
624
625/* convert a class handle to the corresponding class pointer */
626static struct priq_class *
627clh_to_clp(struct priq_if *pif, uint32_t chandle)
628{
629 struct priq_class *cl;
630 int idx;
631
632 if (chandle == 0)
633 return (NULL);
634
635 for (idx = pif->pif_maxpri; idx >= 0; idx--)
636 if ((cl = pif->pif_classes[idx]) != NULL &&
637 cl->cl_handle == chandle)
638 return (cl);
639
640 return (NULL);
641}
642
643#endif /* ALTQ_PRIQ */