if: Add power of 2 mask based CPUID to subqueue mapping
[dragonfly.git] / sys / net / altq / if_altq.h
CommitLineData
4d723e5a 1/* $KAME: if_altq.h,v 1.11 2003/07/10 12:07:50 kjc Exp $ */
4d723e5a
JS
2
3/*
4 * Copyright (C) 1997-2003
5 * Sony Computer Science Laboratories Inc. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
1bd40720
MD
28#ifndef _NET_ALTQ_IF_ALTQ_H_
29#define _NET_ALTQ_IF_ALTQ_H_
4d723e5a 30
9db4b353
SZ
31#ifndef _SYS_SERIALIZE_H_
32#include <sys/serialize.h>
33#endif
34
f0a26983
SZ
35/* Default subqueue */
36#define ALTQ_SUBQ_INDEX_DEFAULT 0
37
38struct mbuf;
4d723e5a
JS
39struct altq_pktattr;
40
f0a26983 41struct ifaltq_subque;
28cc0c29
SZ
42struct ifaltq;
43
2cc2f639
SZ
44typedef int (*altq_mapsubq_t)(struct ifaltq *, int);
45
f0a26983
SZ
46typedef int (*ifsq_enqueue_t)(struct ifaltq_subque *, struct mbuf *,
47 struct altq_pktattr *);
48typedef struct mbuf *(*ifsq_dequeue_t)(struct ifaltq_subque *,
49 struct mbuf *, int);
50typedef int (*ifsq_request_t)(struct ifaltq_subque *, int, void *);
51
52struct ifsubq_stage {
53 struct ifaltq_subque *stg_subq;
54 int stg_cnt;
55 int stg_len;
56 uint32_t stg_flags;
57 TAILQ_ENTRY(ifsubq_stage) stg_link;
58} __cachealign;
59
60#define IFSQ_STAGE_FLAG_QUED 0x1
61#define IFSQ_STAGE_FLAG_SCHED 0x2
62
63struct ifaltq_subque {
64 struct lwkt_serialize ifsq_lock;
65 int ifsq_index;
66
67 struct ifaltq *ifsq_altq;
68 struct ifnet *ifsq_ifp;
69 void *ifsq_hw_priv; /* hw private data */
70
71 /* fields compatible with IFQ_ macros */
72 struct mbuf *ifq_head;
73 struct mbuf *ifq_tail;
74 int ifq_len;
75 int ifq_maxlen;
76
77 ifsq_enqueue_t ifsq_enqueue;
78 ifsq_dequeue_t ifsq_dequeue;
79 ifsq_request_t ifsq_request;
80
81 struct mbuf *ifsq_prepended;/* mbuf dequeued, but not yet xmit */
82 int ifsq_started; /* ifnet.if_start interlock */
83 int ifsq_hw_oactive;/* hw too busy, protected by driver */
84 int ifsq_cpuid; /* owner cpu */
85 struct ifsubq_stage *ifsq_stage;/* packet staging information */
86 struct netmsg_base *ifsq_ifstart_nmsg;
87 /* percpu msgs to sched if_start */
28cc0c29
SZ
88} __cachealign;
89
f0a26983
SZ
90#ifdef _KERNEL
91#define ALTQ_SQ_ASSERT_LOCKED(ifsq) ASSERT_SERIALIZED(&(ifsq)->ifsq_lock)
92#define ALTQ_SQ_LOCK_INIT(ifsq) lwkt_serialize_init(&(ifsq)->ifsq_lock)
93#define ALTQ_SQ_LOCK(ifsq) \
94 lwkt_serialize_adaptive_enter(&(ifsq)->ifsq_lock)
95#define ALTQ_SQ_UNLOCK(ifsq) lwkt_serialize_exit(&(ifsq)->ifsq_lock)
96#endif
28cc0c29 97
4d723e5a
JS
98/*
99 * Structure defining a queue for a network interface.
100 */
101struct ifaltq {
4d723e5a
JS
102 /* alternate queueing related fields */
103 int altq_type; /* discipline type */
104 int altq_flags; /* flags (e.g. ready, in-use) */
105 void *altq_disc; /* for discipline-specific use */
106 struct ifnet *altq_ifp; /* back pointer to interface */
107
4d723e5a
JS
108 /* classifier fields */
109 void *altq_clfier; /* classifier-specific use */
110 void *(*altq_classify)(struct ifaltq *, struct mbuf *,
111 struct altq_pktattr *);
112
113 /* token bucket regulator */
114 struct tb_regulator *altq_tbr;
9db4b353 115
2cc2f639
SZ
116 /* Sub-queues mapping */
117 altq_mapsubq_t altq_mapsubq;
c3fb75dd 118 uint32_t altq_subq_mask;
2cc2f639 119
f0a26983
SZ
120 /* Sub-queues */
121 int altq_subq_cnt;
122 struct ifaltq_subque *altq_subq;
123
124 int altq_maxlen;
4d723e5a
JS
125};
126
f0a26983
SZ
127#ifdef _KERNRL
128/* COMPAT */
129#define ALTQ_LOCK(ifq) \
130 ALTQ_SQ_LOCK(&(ifq)->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT])
131/* COMPAT */
132#define ALTQ_UNLOCK(ifq) \
133 ALTQ_SQ_UNLOCK(&(ifq)->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT])
134#endif
4d723e5a
JS
135
136#ifdef _KERNEL
137
138/*
139 * packet attributes used by queueing disciplines.
140 * pattr_class is a discipline-dependent scheduling class that is
141 * set by a classifier.
142 * pattr_hdr and pattr_af may be used by a discipline to access
143 * the header within a mbuf. (e.g. ECN needs to update the CE bit)
144 * note that pattr_hdr could be stale after m_pullup, though link
145 * layer output routines usually don't use m_pullup. link-level
146 * compression also invalidates these fields. thus, pattr_hdr needs
147 * to be verified when a discipline touches the header.
148 */
149struct altq_pktattr {
150 void *pattr_class; /* sched class set by classifier */
151 int pattr_af; /* address family */
152 caddr_t pattr_hdr; /* saved header position in mbuf */
153};
154
155/*
156 * a token-bucket regulator limits the rate that a network driver can
157 * dequeue packets from the output queue.
158 * modern cards are able to buffer a large amount of packets and dequeue
159 * too many packets at a time. this bursty dequeue behavior makes it
160 * impossible to schedule packets by queueing disciplines.
161 * a token-bucket is used to control the burst size in a device
162 * independent manner.
163 */
164struct tb_regulator {
165 int64_t tbr_rate; /* (scaled) token bucket rate */
166 int64_t tbr_depth; /* (scaled) token bucket depth */
167
168 int64_t tbr_token; /* (scaled) current token */
169 int64_t tbr_filluptime; /* (scaled) time to fill up bucket */
170 uint64_t tbr_last; /* last time token was updated */
171
172 int tbr_lastop; /* last dequeue operation type
173 needed for poll-and-dequeue */
174};
175
176/* if_altqflags */
177#define ALTQF_READY 0x01 /* driver supports alternate queueing */
178#define ALTQF_ENABLED 0x02 /* altq is in use */
179#define ALTQF_CLASSIFY 0x04 /* classify packets */
180#define ALTQF_DRIVER1 0x40 /* driver specific */
181
182/* if_altqflags set internally only: */
183#define ALTQF_CANTCHANGE (ALTQF_READY)
184
185/* altq_dequeue 2nd arg */
186#define ALTDQ_REMOVE 1 /* dequeue mbuf from the queue */
187#define ALTDQ_POLL 2 /* don't dequeue mbuf from the queue */
188
189/* altq request types (currently only purge is defined) */
190#define ALTRQ_PURGE 1 /* purge all packets */
191
2cc2f639 192int altq_attach(struct ifaltq *, int, void *, altq_mapsubq_t,
f0a26983
SZ
193 ifsq_enqueue_t, ifsq_dequeue_t, ifsq_request_t, void *,
194 void *(*)(struct ifaltq *, struct mbuf *, struct altq_pktattr *));
4d723e5a
JS
195int altq_detach(struct ifaltq *);
196int altq_enable(struct ifaltq *);
197int altq_disable(struct ifaltq *);
f0a26983 198struct mbuf *tbr_dequeue(struct ifaltq_subque *, struct mbuf *, int);
4d723e5a
JS
199extern int (*altq_input)(struct mbuf *, int);
200#endif /* _KERNEL */
201
1bd40720 202#endif /* _NET_ALTQ_IF_ALTQ_H_ */