2 * Copyright (c) 2005 The DragonFly Project. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
14 * 3. Neither the name of The DragonFly Project nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific, prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
28 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #ifndef _NET_IFQ_VAR_H_
33 #define _NET_IFQ_VAR_H_
37 #error "This file should not be included by userland programs."
42 #include <sys/systm.h>
44 #ifndef _SYS_THREAD2_H_
45 #include <sys/thread2.h>
47 #ifndef _SYS_SERIALIZE_H_
48 #include <sys/serialize.h>
53 #ifndef _NET_IF_VAR_H_
54 #include <net/if_var.h>
56 #ifndef _NET_ALTQ_IF_ALTQ_H_
57 #include <net/altq/if_altq.h>
60 #define ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq) \
61 KASSERT(ifsq_get_ifp((ifsq)) == (ifp) && \
62 ifsq_get_index((ifsq)) == ALTQ_SUBQ_INDEX_DEFAULT, \
63 ("not ifp's default subqueue"));
68 * Support for "classic" ALTQ interfaces.
70 int ifsq_classic_enqueue(struct ifaltq_subque *, struct mbuf *,
71 struct altq_pktattr *);
72 struct mbuf *ifsq_classic_dequeue(struct ifaltq_subque *, struct mbuf *,
74 int ifsq_classic_request(struct ifaltq_subque *, int, void *);
75 void ifq_set_classic(struct ifaltq *);
77 void ifq_set_maxlen(struct ifaltq *, int);
78 void ifq_set_methods(struct ifaltq *, altq_mapsubq_t,
79 ifsq_enqueue_t, ifsq_dequeue_t, ifsq_request_t);
80 int ifq_mapsubq_default(struct ifaltq *, int);
82 void ifsq_devstart(struct ifaltq_subque *ifsq);
83 void ifsq_devstart_sched(struct ifaltq_subque *ifsq);
86 * Dispatch a packet to an interface.
88 int ifq_dispatch(struct ifnet *, struct mbuf *,
89 struct altq_pktattr *);
94 ifq_is_enabled(struct ifaltq *_ifq)
96 return(_ifq->altq_flags & ALTQF_ENABLED);
100 ifq_is_attached(struct ifaltq *_ifq)
102 return(_ifq->altq_disc != NULL);
108 ifq_is_enabled(struct ifaltq *_ifq)
114 ifq_is_attached(struct ifaltq *_ifq)
122 ifq_is_ready(struct ifaltq *_ifq)
124 return(_ifq->altq_flags & ALTQF_READY);
128 ifq_set_ready(struct ifaltq *_ifq)
130 _ifq->altq_flags |= ALTQF_READY;
134 * ALTQ lock must be held
137 ifsq_enqueue_locked(struct ifaltq_subque *_ifsq, struct mbuf *_m,
138 struct altq_pktattr *_pa)
141 if (!ifq_is_enabled(_ifsq->ifsq_altq))
142 return ifsq_classic_enqueue(_ifsq, _m, _pa);
145 return _ifsq->ifsq_enqueue(_ifsq, _m, _pa);
149 ifsq_enqueue(struct ifaltq_subque *_ifsq, struct mbuf *_m,
150 struct altq_pktattr *_pa)
155 _error = ifsq_enqueue_locked(_ifsq, _m, _pa);
156 ALTQ_SQ_UNLOCK(_ifsq);
160 static __inline struct mbuf *
161 ifsq_dequeue(struct ifaltq_subque *_ifsq, struct mbuf *_mpolled)
166 if (_ifsq->ifsq_prepended != NULL) {
167 _m = _ifsq->ifsq_prepended;
168 _ifsq->ifsq_prepended = NULL;
169 KKASSERT(_ifsq->ifq_len > 0);
171 ALTQ_SQ_UNLOCK(_ifsq);
176 if (_ifsq->ifsq_altq->altq_tbr != NULL)
177 _m = tbr_dequeue(_ifsq, _mpolled, ALTDQ_REMOVE);
178 else if (!ifq_is_enabled(_ifsq->ifsq_altq))
179 _m = ifsq_classic_dequeue(_ifsq, _mpolled, ALTDQ_REMOVE);
182 _m = _ifsq->ifsq_dequeue(_ifsq, _mpolled, ALTDQ_REMOVE);
183 ALTQ_SQ_UNLOCK(_ifsq);
188 * ALTQ lock must be held
190 static __inline struct mbuf *
191 ifsq_poll_locked(struct ifaltq_subque *_ifsq)
193 if (_ifsq->ifsq_prepended != NULL)
194 return _ifsq->ifsq_prepended;
197 if (_ifsq->ifsq_altq->altq_tbr != NULL)
198 return tbr_dequeue(_ifsq, NULL, ALTDQ_POLL);
199 else if (!ifq_is_enabled(_ifsq->ifsq_altq))
200 return ifsq_classic_dequeue(_ifsq, NULL, ALTDQ_POLL);
203 return _ifsq->ifsq_dequeue(_ifsq, NULL, ALTDQ_POLL);
206 static __inline struct mbuf *
207 ifsq_poll(struct ifaltq_subque *_ifsq)
212 _m = ifsq_poll_locked(_ifsq);
213 ALTQ_SQ_UNLOCK(_ifsq);
218 * ALTQ lock must be held
221 ifsq_purge_locked(struct ifaltq_subque *_ifsq)
223 if (_ifsq->ifsq_prepended != NULL) {
224 m_freem(_ifsq->ifsq_prepended);
225 _ifsq->ifsq_prepended = NULL;
226 KKASSERT(_ifsq->ifq_len > 0);
231 if (!ifq_is_enabled(_ifsq->ifsq_altq))
232 ifsq_classic_request(_ifsq, ALTRQ_PURGE, NULL);
235 _ifsq->ifsq_request(_ifsq, ALTRQ_PURGE, NULL);
239 ifsq_purge(struct ifaltq_subque *_ifsq)
242 ifsq_purge_locked(_ifsq);
243 ALTQ_SQ_UNLOCK(_ifsq);
247 ifq_lock_all(struct ifaltq *_ifq)
251 for (_q = 0; _q < _ifq->altq_subq_cnt; ++_q)
252 ALTQ_SQ_LOCK(&_ifq->altq_subq[_q]);
256 ifq_unlock_all(struct ifaltq *_ifq)
260 for (_q = _ifq->altq_subq_cnt - 1; _q >= 0; --_q)
261 ALTQ_SQ_UNLOCK(&_ifq->altq_subq[_q]);
265 * ALTQ lock must be held
268 ifq_purge_all_locked(struct ifaltq *_ifq)
272 for (_q = 0; _q < _ifq->altq_subq_cnt; ++_q)
273 ifsq_purge_locked(&_ifq->altq_subq[_q]);
277 ifq_purge_all(struct ifaltq *_ifq)
280 ifq_purge_all_locked(_ifq);
281 ifq_unlock_all(_ifq);
285 ifq_classify(struct ifaltq *_ifq, struct mbuf *_m, uint8_t _af,
286 struct altq_pktattr *_pa)
289 if (ifq_is_enabled(_ifq)) {
291 _pa->pattr_hdr = mtod(_m, caddr_t);
292 if (ifq_is_enabled(_ifq) &&
293 (_ifq->altq_flags & ALTQF_CLASSIFY)) {
294 /* XXX default subqueue */
295 struct ifaltq_subque *_ifsq =
296 &_ifq->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT];
299 if (ifq_is_enabled(_ifq) &&
300 (_ifq->altq_flags & ALTQF_CLASSIFY))
301 _ifq->altq_classify(_ifq, _m, _pa);
302 ALTQ_SQ_UNLOCK(_ifsq);
309 ifsq_prepend(struct ifaltq_subque *_ifsq, struct mbuf *_m)
312 KASSERT(_ifsq->ifsq_prepended == NULL, ("pending prepended mbuf"));
313 _ifsq->ifsq_prepended = _m;
315 ALTQ_SQ_UNLOCK(_ifsq);
319 * Interface TX serializer must be held
322 ifsq_set_oactive(struct ifaltq_subque *_ifsq)
324 _ifsq->ifsq_hw_oactive = 1;
328 * Interface TX serializer must be held
331 ifsq_clr_oactive(struct ifaltq_subque *_ifsq)
333 _ifsq->ifsq_hw_oactive = 0;
337 * Interface TX serializer must be held
340 ifsq_is_oactive(const struct ifaltq_subque *_ifsq)
342 return _ifsq->ifsq_hw_oactive;
346 * Hand a packet to an interface.
348 * Interface TX serializer must be held. If the interface TX
349 * serializer is not held yet, ifq_dispatch() should be used
350 * to get better performance.
353 ifq_handoff(struct ifnet *_ifp, struct mbuf *_m, struct altq_pktattr *_pa)
355 struct ifaltq_subque *_ifsq;
357 int _qid = ALTQ_SUBQ_INDEX_DEFAULT; /* XXX default subqueue */
359 _ifsq = &_ifp->if_snd.altq_subq[_qid];
361 ASSERT_IFNET_SERIALIZED_TX(_ifp, _ifsq);
362 _error = ifsq_enqueue(_ifsq, _m, _pa);
364 _ifp->if_obytes += _m->m_pkthdr.len;
365 if (_m->m_flags & M_MCAST)
367 if (!ifsq_is_oactive(_ifsq))
368 (*_ifp->if_start)(_ifp, _ifsq);
374 ifsq_is_empty(const struct ifaltq_subque *_ifsq)
376 return(_ifsq->ifq_len == 0);
380 * ALTQ lock must be held
383 ifsq_data_ready(struct ifaltq_subque *_ifsq)
386 if (_ifsq->ifsq_altq->altq_tbr != NULL)
387 return (ifsq_poll_locked(_ifsq) != NULL);
390 return !ifsq_is_empty(_ifsq);
394 * ALTQ lock must be held
397 ifsq_is_started(const struct ifaltq_subque *_ifsq)
399 return _ifsq->ifsq_started;
403 * ALTQ lock must be held
406 ifsq_set_started(struct ifaltq_subque *_ifsq)
408 _ifsq->ifsq_started = 1;
412 * ALTQ lock must be held
415 ifsq_clr_started(struct ifaltq_subque *_ifsq)
417 _ifsq->ifsq_started = 0;
420 static __inline struct ifsubq_stage *
421 ifsq_get_stage(struct ifaltq_subque *_ifsq, int _cpuid)
423 return &_ifsq->ifsq_stage[_cpuid];
427 ifsq_get_cpuid(const struct ifaltq_subque *_ifsq)
429 return _ifsq->ifsq_cpuid;
433 ifsq_set_cpuid(struct ifaltq_subque *_ifsq, int _cpuid)
435 KASSERT(_cpuid >= 0 && _cpuid < ncpus,
436 ("invalid ifsq_cpuid %d", _cpuid));
437 _ifsq->ifsq_cpuid = _cpuid;
440 static __inline struct lwkt_msg *
441 ifsq_get_ifstart_lmsg(struct ifaltq_subque *_ifsq, int _cpuid)
443 return &_ifsq->ifsq_ifstart_nmsg[_cpuid].lmsg;
447 ifsq_get_index(const struct ifaltq_subque *_ifsq)
449 return _ifsq->ifsq_index;
453 ifsq_set_priv(struct ifaltq_subque *_ifsq, void *_priv)
455 _ifsq->ifsq_hw_priv = _priv;
458 static __inline void *
459 ifsq_get_priv(const struct ifaltq_subque *_ifsq)
461 return _ifsq->ifsq_hw_priv;
464 static __inline struct ifnet *
465 ifsq_get_ifp(const struct ifaltq_subque *_ifsq)
467 return _ifsq->ifsq_ifp;
470 static __inline struct ifaltq_subque *
471 ifq_get_subq_default(const struct ifaltq *_ifq)
473 return &_ifq->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT];
476 static __inline struct ifaltq_subque *
477 ifq_get_subq(const struct ifaltq *_ifq, int _idx)
479 KASSERT(_idx >= 0 && _idx < _ifq->altq_subq_cnt,
480 ("invalid qid %d", _idx));
481 return &_ifq->altq_subq[_idx];
484 static __inline struct ifaltq_subque *
485 ifq_map_subq(struct ifaltq *_ifq, int _cpuid)
487 int _idx = _ifq->altq_mapsubq(_ifq, _cpuid);
488 return ifq_get_subq(_ifq, _idx);
493 ifq_is_oactive(const struct ifaltq *_ifq)
495 return ifsq_is_oactive(ifq_get_subq_default(_ifq));
500 ifq_set_oactive(struct ifaltq *_ifq)
502 ifsq_set_oactive(ifq_get_subq_default(_ifq));
507 ifq_clr_oactive(struct ifaltq *_ifq)
509 ifsq_clr_oactive(ifq_get_subq_default(_ifq));
514 ifq_is_empty(struct ifaltq *_ifq)
516 return ifsq_is_empty(ifq_get_subq_default(_ifq));
521 ifq_purge(struct ifaltq *_ifq)
523 ifsq_purge(ifq_get_subq_default(_ifq));
527 static __inline struct mbuf *
528 ifq_dequeue(struct ifaltq *_ifq, struct mbuf *_mpolled)
530 return ifsq_dequeue(ifq_get_subq_default(_ifq), _mpolled);
535 ifq_prepend(struct ifaltq *_ifq, struct mbuf *_m)
537 ifsq_prepend(ifq_get_subq_default(_ifq), _m);
542 ifq_set_cpuid(struct ifaltq *_ifq, int _cpuid)
544 KASSERT(_ifq->altq_subq_cnt == 1,
545 ("invalid subqueue count %d", _ifq->altq_subq_cnt));
546 ifsq_set_cpuid(ifq_get_subq_default(_ifq), _cpuid);
550 #endif /* _NET_IFQ_VAR_H_ */