2 * Copyright (c) 2005 The DragonFly Project. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
14 * 3. Neither the name of The DragonFly Project nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific, prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
28 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #ifndef _NET_IFQ_VAR_H_
33 #define _NET_IFQ_VAR_H_
37 #error "This file should not be included by userland programs."
42 #include <sys/systm.h>
44 #ifndef _SYS_THREAD2_H_
45 #include <sys/thread2.h>
47 #ifndef _SYS_SERIALIZE_H_
48 #include <sys/serialize.h>
53 #ifndef _NET_IF_VAR_H_
54 #include <net/if_var.h>
56 #ifndef _NET_ALTQ_IF_ALTQ_H_
57 #include <net/altq/if_altq.h>
60 #define ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq) \
61 KASSERT(ifsq_get_ifp((ifsq)) == (ifp) && \
62 ifsq_get_index((ifsq)) == ALTQ_SUBQ_INDEX_DEFAULT, \
63 ("not ifp's default subqueue"));
68 * Support for "classic" ALTQ interfaces.
70 int ifsq_classic_enqueue(struct ifaltq_subque *, struct mbuf *,
71 struct altq_pktattr *);
72 struct mbuf *ifsq_classic_dequeue(struct ifaltq_subque *, struct mbuf *,
74 int ifsq_classic_request(struct ifaltq_subque *, int, void *);
75 void ifq_set_classic(struct ifaltq *);
77 void ifq_set_maxlen(struct ifaltq *, int);
78 void ifq_set_methods(struct ifaltq *, ifsq_enqueue_t,
79 ifsq_dequeue_t, ifsq_request_t);
81 void ifsq_devstart(struct ifaltq_subque *ifsq);
82 void ifsq_devstart_sched(struct ifaltq_subque *ifsq);
85 * Dispatch a packet to an interface.
87 int ifq_dispatch(struct ifnet *, struct mbuf *,
88 struct altq_pktattr *);
93 ifq_is_enabled(struct ifaltq *_ifq)
95 return(_ifq->altq_flags & ALTQF_ENABLED);
99 ifq_is_attached(struct ifaltq *_ifq)
101 return(_ifq->altq_disc != NULL);
107 ifq_is_enabled(struct ifaltq *_ifq)
113 ifq_is_attached(struct ifaltq *_ifq)
121 ifq_is_ready(struct ifaltq *_ifq)
123 return(_ifq->altq_flags & ALTQF_READY);
127 ifq_set_ready(struct ifaltq *_ifq)
129 _ifq->altq_flags |= ALTQF_READY;
133 * ALTQ lock must be held
136 ifsq_enqueue_locked(struct ifaltq_subque *_ifsq, struct mbuf *_m,
137 struct altq_pktattr *_pa)
140 if (!ifq_is_enabled(_ifsq->ifsq_altq))
141 return ifsq_classic_enqueue(_ifsq, _m, _pa);
144 return _ifsq->ifsq_enqueue(_ifsq, _m, _pa);
148 ifsq_enqueue(struct ifaltq_subque *_ifsq, struct mbuf *_m,
149 struct altq_pktattr *_pa)
154 _error = ifsq_enqueue_locked(_ifsq, _m, _pa);
155 ALTQ_SQ_UNLOCK(_ifsq);
159 static __inline struct mbuf *
160 ifsq_dequeue(struct ifaltq_subque *_ifsq, struct mbuf *_mpolled)
165 if (_ifsq->ifsq_prepended != NULL) {
166 _m = _ifsq->ifsq_prepended;
167 _ifsq->ifsq_prepended = NULL;
168 KKASSERT(_ifsq->ifq_len > 0);
170 ALTQ_SQ_UNLOCK(_ifsq);
175 if (_ifsq->ifsq_altq->altq_tbr != NULL)
176 _m = tbr_dequeue(_ifsq, _mpolled, ALTDQ_REMOVE);
177 else if (!ifq_is_enabled(_ifsq->ifsq_altq))
178 _m = ifsq_classic_dequeue(_ifsq, _mpolled, ALTDQ_REMOVE);
181 _m = _ifsq->ifsq_dequeue(_ifsq, _mpolled, ALTDQ_REMOVE);
182 ALTQ_SQ_UNLOCK(_ifsq);
187 * ALTQ lock must be held
189 static __inline struct mbuf *
190 ifsq_poll_locked(struct ifaltq_subque *_ifsq)
192 if (_ifsq->ifsq_prepended != NULL)
193 return _ifsq->ifsq_prepended;
196 if (_ifsq->ifsq_altq->altq_tbr != NULL)
197 return tbr_dequeue(_ifsq, NULL, ALTDQ_POLL);
198 else if (!ifq_is_enabled(_ifsq->ifsq_altq))
199 return ifsq_classic_dequeue(_ifsq, NULL, ALTDQ_POLL);
202 return _ifsq->ifsq_dequeue(_ifsq, NULL, ALTDQ_POLL);
205 static __inline struct mbuf *
206 ifsq_poll(struct ifaltq_subque *_ifsq)
211 _m = ifsq_poll_locked(_ifsq);
212 ALTQ_SQ_UNLOCK(_ifsq);
217 * ALTQ lock must be held
220 ifsq_purge_locked(struct ifaltq_subque *_ifsq)
222 if (_ifsq->ifsq_prepended != NULL) {
223 m_freem(_ifsq->ifsq_prepended);
224 _ifsq->ifsq_prepended = NULL;
225 KKASSERT(_ifsq->ifq_len > 0);
230 if (!ifq_is_enabled(_ifsq->ifsq_altq))
231 ifsq_classic_request(_ifsq, ALTRQ_PURGE, NULL);
234 _ifsq->ifsq_request(_ifsq, ALTRQ_PURGE, NULL);
238 ifsq_purge(struct ifaltq_subque *_ifsq)
241 ifsq_purge_locked(_ifsq);
242 ALTQ_SQ_UNLOCK(_ifsq);
246 ifq_lock_all(struct ifaltq *_ifq)
250 for (_q = 0; _q < _ifq->altq_subq_cnt; ++_q)
251 ALTQ_SQ_LOCK(&_ifq->altq_subq[_q]);
255 ifq_unlock_all(struct ifaltq *_ifq)
259 for (_q = _ifq->altq_subq_cnt - 1; _q >= 0; --_q)
260 ALTQ_SQ_UNLOCK(&_ifq->altq_subq[_q]);
264 * ALTQ lock must be held
267 ifq_purge_all_locked(struct ifaltq *_ifq)
271 for (_q = 0; _q < _ifq->altq_subq_cnt; ++_q)
272 ifsq_purge_locked(&_ifq->altq_subq[_q]);
276 ifq_purge_all(struct ifaltq *_ifq)
279 ifq_purge_all_locked(_ifq);
280 ifq_unlock_all(_ifq);
284 ifq_classify(struct ifaltq *_ifq, struct mbuf *_m, uint8_t _af,
285 struct altq_pktattr *_pa)
288 if (ifq_is_enabled(_ifq)) {
290 _pa->pattr_hdr = mtod(_m, caddr_t);
291 if (ifq_is_enabled(_ifq) &&
292 (_ifq->altq_flags & ALTQF_CLASSIFY)) {
293 /* XXX default subqueue */
294 struct ifaltq_subque *_ifsq =
295 &_ifq->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT];
298 if (ifq_is_enabled(_ifq) &&
299 (_ifq->altq_flags & ALTQF_CLASSIFY))
300 _ifq->altq_classify(_ifq, _m, _pa);
301 ALTQ_SQ_UNLOCK(_ifsq);
308 ifsq_prepend(struct ifaltq_subque *_ifsq, struct mbuf *_m)
311 KASSERT(_ifsq->ifsq_prepended == NULL, ("pending prepended mbuf"));
312 _ifsq->ifsq_prepended = _m;
314 ALTQ_SQ_UNLOCK(_ifsq);
318 * Interface TX serializer must be held
321 ifsq_set_oactive(struct ifaltq_subque *_ifsq)
323 _ifsq->ifsq_hw_oactive = 1;
327 * Interface TX serializer must be held
330 ifsq_clr_oactive(struct ifaltq_subque *_ifsq)
332 _ifsq->ifsq_hw_oactive = 0;
336 * Interface TX serializer must be held
339 ifsq_is_oactive(const struct ifaltq_subque *_ifsq)
341 return _ifsq->ifsq_hw_oactive;
345 * Hand a packet to an interface.
347 * Interface TX serializer must be held. If the interface TX
348 * serializer is not held yet, ifq_dispatch() should be used
349 * to get better performance.
352 ifq_handoff(struct ifnet *_ifp, struct mbuf *_m, struct altq_pktattr *_pa)
354 struct ifaltq_subque *_ifsq;
356 int _qid = ALTQ_SUBQ_INDEX_DEFAULT; /* XXX default subqueue */
358 _ifsq = &_ifp->if_snd.altq_subq[_qid];
360 ASSERT_IFNET_SERIALIZED_TX(_ifp, _ifsq);
361 _error = ifsq_enqueue(_ifsq, _m, _pa);
363 _ifp->if_obytes += _m->m_pkthdr.len;
364 if (_m->m_flags & M_MCAST)
366 if (!ifsq_is_oactive(_ifsq))
367 (*_ifp->if_start)(_ifp, _ifsq);
373 ifsq_is_empty(const struct ifaltq_subque *_ifsq)
375 return(_ifsq->ifq_len == 0);
379 * ALTQ lock must be held
382 ifsq_data_ready(struct ifaltq_subque *_ifsq)
385 if (_ifsq->ifsq_altq->altq_tbr != NULL)
386 return (ifsq_poll_locked(_ifsq) != NULL);
389 return !ifsq_is_empty(_ifsq);
393 * ALTQ lock must be held
396 ifsq_is_started(const struct ifaltq_subque *_ifsq)
398 return _ifsq->ifsq_started;
402 * ALTQ lock must be held
405 ifsq_set_started(struct ifaltq_subque *_ifsq)
407 _ifsq->ifsq_started = 1;
411 * ALTQ lock must be held
414 ifsq_clr_started(struct ifaltq_subque *_ifsq)
416 _ifsq->ifsq_started = 0;
419 static __inline struct ifsubq_stage *
420 ifsq_get_stage(struct ifaltq_subque *_ifsq, int _cpuid)
422 return &_ifsq->ifsq_stage[_cpuid];
426 ifsq_get_cpuid(const struct ifaltq_subque *_ifsq)
428 return _ifsq->ifsq_cpuid;
432 ifsq_set_cpuid(struct ifaltq_subque *_ifsq, int _cpuid)
434 KASSERT(_cpuid >= 0 && _cpuid < ncpus,
435 ("invalid ifsq_cpuid %d", _cpuid));
436 _ifsq->ifsq_cpuid = _cpuid;
439 static __inline struct lwkt_msg *
440 ifsq_get_ifstart_lmsg(struct ifaltq_subque *_ifsq, int _cpuid)
442 return &_ifsq->ifsq_ifstart_nmsg[_cpuid].lmsg;
446 ifsq_get_index(const struct ifaltq_subque *_ifsq)
448 return _ifsq->ifsq_index;
452 ifsq_set_priv(struct ifaltq_subque *_ifsq, void *_priv)
454 _ifsq->ifsq_hw_priv = _priv;
457 static __inline void *
458 ifsq_get_priv(const struct ifaltq_subque *_ifsq)
460 return _ifsq->ifsq_hw_priv;
463 static __inline struct ifnet *
464 ifsq_get_ifp(const struct ifaltq_subque *_ifsq)
466 return _ifsq->ifsq_ifp;
469 static __inline struct ifaltq_subque *
470 ifq_get_subq_default(const struct ifaltq *_ifq)
472 return &_ifq->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT];
475 static __inline struct ifaltq_subque *
476 ifq_get_subq(const struct ifaltq *_ifq, int _idx)
478 KASSERT(_idx >= 0 && _idx < _ifq->altq_subq_cnt,
479 ("invalid qid %d", _idx));
480 return &_ifq->altq_subq[_idx];
485 ifq_is_oactive(const struct ifaltq *_ifq)
487 return ifsq_is_oactive(ifq_get_subq_default(_ifq));
492 ifq_set_oactive(struct ifaltq *_ifq)
494 ifsq_set_oactive(ifq_get_subq_default(_ifq));
499 ifq_clr_oactive(struct ifaltq *_ifq)
501 ifsq_clr_oactive(ifq_get_subq_default(_ifq));
506 ifq_is_empty(struct ifaltq *_ifq)
508 return ifsq_is_empty(ifq_get_subq_default(_ifq));
513 ifq_purge(struct ifaltq *_ifq)
515 ifsq_purge(ifq_get_subq_default(_ifq));
519 static __inline struct mbuf *
520 ifq_dequeue(struct ifaltq *_ifq, struct mbuf *_mpolled)
522 return ifsq_dequeue(ifq_get_subq_default(_ifq), _mpolled);
527 ifq_prepend(struct ifaltq *_ifq, struct mbuf *_m)
529 ifsq_prepend(ifq_get_subq_default(_ifq), _m);
534 ifq_set_cpuid(struct ifaltq *_ifq, int _cpuid)
536 KASSERT(_ifq->altq_subq_cnt == 1,
537 ("invalid subqueue count %d", _ifq->altq_subq_cnt));
538 ifsq_set_cpuid(ifq_get_subq_default(_ifq), _cpuid);
542 #endif /* _NET_IFQ_VAR_H_ */