X-Git-Url: https://gitweb.dragonflybsd.org/dragonfly.git/blobdiff_plain/532773eea27a1126cdcfde7e2cd4925465165e67..2cc2f6391cf953ba02d6c69d8282aed2d8d4caa8:/sys/net/altq/if_altq.h diff --git a/sys/net/altq/if_altq.h b/sys/net/altq/if_altq.h index 9b0da4b5d6..ecaaf31cd6 100644 --- a/sys/net/altq/if_altq.h +++ b/sys/net/altq/if_altq.h @@ -1,5 +1,4 @@ /* $KAME: if_altq.h,v 1.11 2003/07/10 12:07:50 kjc Exp $ */ -/* $DragonFly: src/sys/net/altq/if_altq.h,v 1.4 2008/05/14 11:59:23 sephe Exp $ */ /* * Copyright (C) 1997-2003 @@ -33,30 +32,79 @@ #include #endif +/* Default subqueue */ +#define ALTQ_SUBQ_INDEX_DEFAULT 0 + +struct mbuf; struct altq_pktattr; +struct ifaltq_subque; +struct ifaltq; + +typedef int (*altq_mapsubq_t)(struct ifaltq *, int); + +typedef int (*ifsq_enqueue_t)(struct ifaltq_subque *, struct mbuf *, + struct altq_pktattr *); +typedef struct mbuf *(*ifsq_dequeue_t)(struct ifaltq_subque *, + struct mbuf *, int); +typedef int (*ifsq_request_t)(struct ifaltq_subque *, int, void *); + +struct ifsubq_stage { + struct ifaltq_subque *stg_subq; + int stg_cnt; + int stg_len; + uint32_t stg_flags; + TAILQ_ENTRY(ifsubq_stage) stg_link; +} __cachealign; + +#define IFSQ_STAGE_FLAG_QUED 0x1 +#define IFSQ_STAGE_FLAG_SCHED 0x2 + +struct ifaltq_subque { + struct lwkt_serialize ifsq_lock; + int ifsq_index; + + struct ifaltq *ifsq_altq; + struct ifnet *ifsq_ifp; + void *ifsq_hw_priv; /* hw private data */ + + /* fields compatible with IFQ_ macros */ + struct mbuf *ifq_head; + struct mbuf *ifq_tail; + int ifq_len; + int ifq_maxlen; + + ifsq_enqueue_t ifsq_enqueue; + ifsq_dequeue_t ifsq_dequeue; + ifsq_request_t ifsq_request; + + struct mbuf *ifsq_prepended;/* mbuf dequeued, but not yet xmit */ + int ifsq_started; /* ifnet.if_start interlock */ + int ifsq_hw_oactive;/* hw too busy, protected by driver */ + int ifsq_cpuid; /* owner cpu */ + struct ifsubq_stage *ifsq_stage;/* packet staging information */ + struct netmsg_base *ifsq_ifstart_nmsg; + /* percpu msgs to sched if_start */ +} __cachealign; + +#ifdef _KERNEL +#define ALTQ_SQ_ASSERT_LOCKED(ifsq) ASSERT_SERIALIZED(&(ifsq)->ifsq_lock) +#define ALTQ_SQ_LOCK_INIT(ifsq) lwkt_serialize_init(&(ifsq)->ifsq_lock) +#define ALTQ_SQ_LOCK(ifsq) \ + lwkt_serialize_adaptive_enter(&(ifsq)->ifsq_lock) +#define ALTQ_SQ_UNLOCK(ifsq) lwkt_serialize_exit(&(ifsq)->ifsq_lock) +#endif + /* * Structure defining a queue for a network interface. */ struct ifaltq { - /* fields compatible with struct ifqueue */ - struct mbuf *ifq_head; - struct mbuf *ifq_tail; - int ifq_len; - int ifq_maxlen; - int ifq_drops; - /* alternate queueing related fields */ int altq_type; /* discipline type */ int altq_flags; /* flags (e.g. ready, in-use) */ void *altq_disc; /* for discipline-specific use */ struct ifnet *altq_ifp; /* back pointer to interface */ - int (*altq_enqueue)(struct ifaltq *, struct mbuf *, - struct altq_pktattr *); - struct mbuf *(*altq_dequeue)(struct ifaltq *, struct mbuf *, int); - int (*altq_request)(struct ifaltq *, int, void *); - /* classifier fields */ void *altq_clfier; /* classifier-specific use */ void *(*altq_classify)(struct ifaltq *, struct mbuf *, @@ -65,21 +113,24 @@ struct ifaltq { /* token bucket regulator */ struct tb_regulator *altq_tbr; - struct lwkt_serialize altq_lock; - struct mbuf *altq_prepended; /* mbuf dequeued, but not yet xmit */ - int altq_started; /* ifnet.if_start interlock */ + /* Sub-queues mapping */ + altq_mapsubq_t altq_mapsubq; + uint32_t altq_map_unused; + + /* Sub-queues */ + int altq_subq_cnt; + struct ifaltq_subque *altq_subq; + + int altq_maxlen; }; -#ifdef SMP -#define ALTQ_ASSERT_LOCKED(ifq) ASSERT_SERIALIZED(&(ifq)->altq_lock) -#define ALTQ_LOCK_INIT(ifq) lwkt_serialize_init(&(ifq)->altq_lock) -#define ALTQ_LOCK(ifq) lwkt_serialize_adaptive_enter(&(ifq)->altq_lock) -#define ALTQ_UNLOCK(ifq) lwkt_serialize_exit(&(ifq)->altq_lock) -#else -#define ALTQ_ASSERT_LOCKED(ifq) ((void)0) /* XXX */ -#define ALTQ_LOCK_INIT(ifq) ((void)0) -#define ALTQ_LOCK(ifq) crit_enter() -#define ALTQ_UNLOCK(ifq) crit_exit() +#ifdef _KERNRL +/* COMPAT */ +#define ALTQ_LOCK(ifq) \ + ALTQ_SQ_LOCK(&(ifq)->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT]) +/* COMPAT */ +#define ALTQ_UNLOCK(ifq) \ + ALTQ_SQ_UNLOCK(&(ifq)->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT]) #endif #ifdef _KERNEL @@ -138,16 +189,13 @@ struct tb_regulator { /* altq request types (currently only purge is defined) */ #define ALTRQ_PURGE 1 /* purge all packets */ -int altq_attach(struct ifaltq *, int, void *, - int (*)(struct ifaltq *, struct mbuf *, struct altq_pktattr *), - struct mbuf *(*)(struct ifaltq *, struct mbuf *, int), - int (*)(struct ifaltq *, int, void *), - void *, void *(*)(struct ifaltq *, struct mbuf *, - struct altq_pktattr *)); +int altq_attach(struct ifaltq *, int, void *, altq_mapsubq_t, + ifsq_enqueue_t, ifsq_dequeue_t, ifsq_request_t, void *, + void *(*)(struct ifaltq *, struct mbuf *, struct altq_pktattr *)); int altq_detach(struct ifaltq *); int altq_enable(struct ifaltq *); int altq_disable(struct ifaltq *); -struct mbuf *tbr_dequeue(struct ifaltq *, struct mbuf *, int); +struct mbuf *tbr_dequeue(struct ifaltq_subque *, struct mbuf *, int); extern int (*altq_input)(struct mbuf *, int); #endif /* _KERNEL */