igb: Set default RX descriptor count to 512
[dragonfly.git] / sys / dev / netif / igb / if_igb.h
... / ...
CommitLineData
1/*
2 * Copyright (c) 2001-2011, Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the Intel Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#ifndef _IF_IGB_H_
33#define _IF_IGB_H_
34
35/* Tunables */
36
37/*
38 * Max ring count
39 */
40#define IGB_MAX_RING_82575 4
41#define IGB_MAX_RING_I350 8
42#define IGB_MAX_RING_82580 8
43#define IGB_MAX_RING_82576 16
44#define IGB_MIN_RING 1
45#define IGB_MIN_RING_RSS 2
46
47/*
48 * Max TX/RX interrupt bits
49 */
50#define IGB_MAX_TXRXINT_82575 4 /* XXX not used */
51#define IGB_MAX_TXRXINT_I350 8
52#define IGB_MAX_TXRXINT_82580 8
53#define IGB_MAX_TXRXINT_82576 16
54#define IGB_MIN_TXRXINT 2 /* XXX VF? */
55
56/*
57 * Max IVAR count
58 */
59#define IGB_MAX_IVAR_I350 4
60#define IGB_MAX_IVAR_82580 4
61#define IGB_MAX_IVAR_82576 8
62#define IGB_MAX_IVAR_VF 1
63
64/*
65 * IGB_TXD: Maximum number of Transmit Descriptors
66 *
67 * This value is the number of transmit descriptors allocated by the driver.
68 * Increasing this value allows the driver to queue more transmits. Each
69 * descriptor is 16 bytes.
70 * Since TDLEN should be multiple of 128bytes, the number of transmit
71 * desscriptors should meet the following condition.
72 * (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
73 */
74#define IGB_MIN_TXD 256
75#define IGB_DEFAULT_TXD 1024
76#define IGB_MAX_TXD 4096
77
78/*
79 * IGB_RXD: Maximum number of Transmit Descriptors
80 *
81 * This value is the number of receive descriptors allocated by the driver.
82 * Increasing this value allows the driver to buffer more incoming packets.
83 * Each descriptor is 16 bytes. A receive buffer is also allocated for each
84 * descriptor. The maximum MTU size is 16110.
85 * Since TDLEN should be multiple of 128bytes, the number of transmit
86 * desscriptors should meet the following condition.
87 * (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
88 */
89#define IGB_MIN_RXD 256
90#define IGB_DEFAULT_RXD 512
91#define IGB_MAX_RXD 4096
92
93/*
94 * This parameter controls when the driver calls the routine to reclaim
95 * transmit descriptors. Cleaning earlier seems a win.
96 */
97#define IGB_TX_CLEANUP_THRESHOLD(sc) ((sc)->num_tx_desc / 2)
98
99/*
100 * This parameter controls whether or not autonegotation is enabled.
101 * 0 - Disable autonegotiation
102 * 1 - Enable autonegotiation
103 */
104#define DO_AUTO_NEG 1
105
106/*
107 * This parameter control whether or not the driver will wait for
108 * autonegotiation to complete.
109 * 1 - Wait for autonegotiation to complete
110 * 0 - Don't wait for autonegotiation to complete
111 */
112#define WAIT_FOR_AUTO_NEG_DEFAULT 0
113
114/* Tunables -- End */
115
116#define AUTONEG_ADV_DEFAULT (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
117 ADVERTISE_100_HALF | ADVERTISE_100_FULL | \
118 ADVERTISE_1000_FULL)
119
120#define AUTO_ALL_MODES 0
121
122/* PHY master/slave setting */
123#define IGB_MASTER_SLAVE e1000_ms_hw_default
124
125/*
126 * Micellaneous constants
127 */
128#define IGB_VENDOR_ID 0x8086
129
130#define IGB_JUMBO_PBA 0x00000028
131#define IGB_DEFAULT_PBA 0x00000030
132#define IGB_SMARTSPEED_DOWNSHIFT 3
133#define IGB_SMARTSPEED_MAX 15
134#define IGB_MAX_LOOP 10
135
136#define IGB_RX_PTHRESH (hw->mac.type <= e1000_82576 ? 16 : 8)
137#define IGB_RX_HTHRESH 8
138#define IGB_RX_WTHRESH 1
139
140#define IGB_TX_PTHRESH 8
141#define IGB_TX_HTHRESH 1
142#define IGB_TX_WTHRESH 16
143
144#define MAX_NUM_MULTICAST_ADDRESSES 128
145#define IGB_FC_PAUSE_TIME 0x0680
146
147#define IGB_INTR_RATE 6000
148#define IGB_MSIX_RX_RATE 6000
149#define IGB_MSIX_TX_RATE 4000
150
151/*
152 * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
153 * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
154 * also optimize cache line size effect. H/W supports up to cache line size 128.
155 */
156#define IGB_DBA_ALIGN 128
157
158/* PCI Config defines */
159#define IGB_MSIX_BAR 3
160
161#define IGB_MAX_SCATTER 64
162#define IGB_VFTA_SIZE 128
163#define IGB_TSO_SIZE (IP_MAXPACKET + \
164 sizeof(struct ether_vlan_header))
165#define IGB_HDR_BUF 128
166#define IGB_PKTTYPE_MASK 0x0000FFF0
167
168#define IGB_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
169#define IGB_IPVHL_SIZE 1 /* sizeof(ip.ip_vhl) */
170#define IGB_TXCSUM_MINHL (ETHER_HDR_LEN + EVL_ENCAPLEN + \
171 IGB_IPVHL_SIZE)
172
173/* One for TX csum offloading desc, the other 2 are reserved */
174#define IGB_TX_RESERVED 3
175
176/* Large enough for 64K TSO */
177#define IGB_TX_SPARE 33
178
179#define IGB_TX_OACTIVE_MAX 64
180
181/* main + 16x RX + 16x TX */
182#define IGB_NSERIALIZE 33
183
184#define IGB_NRSSRK 10
185#define IGB_RSSRK_SIZE 4
186#define IGB_RSSRK_VAL(key, i) (key[(i) * IGB_RSSRK_SIZE] | \
187 key[(i) * IGB_RSSRK_SIZE + 1] << 8 | \
188 key[(i) * IGB_RSSRK_SIZE + 2] << 16 | \
189 key[(i) * IGB_RSSRK_SIZE + 3] << 24)
190
191#define IGB_NRETA 32
192#define IGB_RETA_SIZE 4
193#define IGB_RETA_SHIFT 0
194#define IGB_RETA_SHIFT_82575 6
195
196#define IGB_EITR_INTVL_MASK 0x7ffc
197#define IGB_EITR_INTVL_SHIFT 2
198
199struct igb_softc;
200
201/*
202 * Bus dma information structure
203 */
204struct igb_dma {
205 bus_addr_t dma_paddr;
206 void *dma_vaddr;
207 bus_dma_tag_t dma_tag;
208 bus_dmamap_t dma_map;
209};
210
211/*
212 * Transmit ring: one per queue
213 */
214struct igb_tx_ring {
215 struct lwkt_serialize tx_serialize;
216 struct igb_softc *sc;
217 uint32_t me;
218 struct igb_dma txdma;
219 bus_dma_tag_t tx_hdr_dtag;
220 bus_dmamap_t tx_hdr_dmap;
221 bus_addr_t tx_hdr_paddr;
222 struct e1000_tx_desc *tx_base;
223 int num_tx_desc;
224 uint32_t next_avail_desc;
225 uint32_t next_to_clean;
226 uint32_t *tx_hdr;
227 int tx_avail;
228 struct igb_tx_buf *tx_buf;
229 bus_dma_tag_t tx_tag;
230 int tx_nsegs;
231 int spare_desc;
232 int oact_lo_desc;
233 int oact_hi_desc;
234 int intr_nsegs;
235 int tx_intr_bit;
236 uint32_t tx_intr_mask;
237
238 u_long no_desc_avail;
239 u_long tx_packets;
240};
241
242/*
243 * Receive ring: one per queue
244 */
245struct igb_rx_ring {
246 struct lwkt_serialize rx_serialize;
247 struct igb_softc *sc;
248 uint32_t me;
249 struct igb_dma rxdma;
250 union e1000_adv_rx_desc *rx_base;
251 boolean_t discard;
252 int num_rx_desc;
253 uint32_t next_to_check;
254 struct igb_rx_buf *rx_buf;
255 bus_dma_tag_t rx_tag;
256 bus_dmamap_t rx_sparemap;
257 int rx_intr_bit;
258 uint32_t rx_intr_mask;
259
260 /*
261 * First/last mbuf pointers, for
262 * collecting multisegment RX packets.
263 */
264 struct mbuf *fmp;
265 struct mbuf *lmp;
266
267 /* Soft stats */
268 u_long rx_packets;
269};
270
271struct igb_msix_data {
272 struct lwkt_serialize *msix_serialize;
273 struct lwkt_serialize msix_serialize0;
274 struct igb_softc *msix_sc;
275 uint32_t msix_mask;
276 struct igb_rx_ring *msix_rx;
277 struct igb_tx_ring *msix_tx;
278
279 driver_intr_t *msix_func;
280 void *msix_arg;
281
282 int msix_cpuid;
283 char msix_desc[32];
284 int msix_rid;
285 struct resource *msix_res;
286 void *msix_handle;
287 u_int msix_vector;
288 int msix_rate;
289 char msix_rate_desc[32];
290};
291
292struct igb_softc {
293 struct arpcom arpcom;
294 struct e1000_hw hw;
295
296 struct e1000_osdep osdep;
297 device_t dev;
298 uint32_t flags;
299#define IGB_FLAG_SHARED_INTR 0x1
300#define IGB_FLAG_HAS_MGMT 0x2
301#define IGB_FLAG_TSO_IPLEN0 0x4
302
303 bus_dma_tag_t parent_tag;
304
305 int mem_rid;
306 struct resource *mem_res;
307
308 struct ifmedia media;
309 struct callout timer;
310
311 int intr_type;
312 int intr_rid;
313 struct resource *intr_res;
314 void *intr_tag;
315
316 int if_flags;
317 int max_frame_size;
318 int pause_frames;
319 uint16_t vf_ifp; /* a VF interface */
320
321 /* Management and WOL features */
322 int wol;
323
324 /* Info about the interface */
325 uint8_t link_active;
326 uint16_t link_speed;
327 uint16_t link_duplex;
328 uint32_t smartspeed;
329 uint32_t dma_coalesce;
330
331 /* Multicast array pointer */
332 uint8_t *mta;
333
334 int serialize_cnt;
335 int tx_serialize;
336 int rx_serialize;
337 struct lwkt_serialize *serializes[IGB_NSERIALIZE];
338 struct lwkt_serialize main_serialize;
339
340 int intr_rate;
341 uint32_t intr_mask;
342 int sts_intr_bit;
343 uint32_t sts_intr_mask;
344
345 /*
346 * Transmit rings
347 */
348 int tx_ring_cnt;
349 struct igb_tx_ring *tx_rings;
350
351 /*
352 * Receive rings
353 */
354 int rss_debug;
355 int rx_ring_cnt;
356 int rx_ring_msix;
357 int rx_ring_inuse;
358 struct igb_rx_ring *rx_rings;
359
360 /* Misc stats maintained by the driver */
361 u_long dropped_pkts;
362 u_long mbuf_defrag_failed;
363 u_long no_tx_dma_setup;
364 u_long watchdog_events;
365 u_long rx_overruns;
366 u_long device_control;
367 u_long rx_control;
368 u_long int_mask;
369 u_long eint_mask;
370 u_long packet_buf_alloc_rx;
371 u_long packet_buf_alloc_tx;
372
373 /* sysctl tree glue */
374 struct sysctl_ctx_list sysctl_ctx;
375 struct sysctl_oid *sysctl_tree;
376
377 void *stats;
378
379 int msix_tx_cpuid;
380 int msix_mem_rid;
381 struct resource *msix_mem_res;
382 int msix_cnt;
383 struct igb_msix_data *msix_data;
384};
385
386#define IGB_ENABLE_HWRSS(sc) ((sc)->rx_ring_cnt > 1)
387
388struct igb_tx_buf {
389 struct mbuf *m_head;
390 bus_dmamap_t map; /* bus_dma map for packet */
391};
392
393struct igb_rx_buf {
394 struct mbuf *m_head;
395 bus_dmamap_t map; /* bus_dma map for packet */
396 bus_addr_t paddr;
397};
398
399#define UPDATE_VF_REG(reg, last, cur) \
400{ \
401 uint32_t new = E1000_READ_REG(hw, reg); \
402 if (new < last) \
403 cur += 0x100000000LL; \
404 last = new; \
405 cur &= 0xFFFFFFFF00000000LL; \
406 cur |= new; \
407}
408
409#define IGB_IS_OACTIVE(txr) ((txr)->tx_avail < (txr)->oact_lo_desc)
410#define IGB_IS_NOT_OACTIVE(txr) ((txr)->tx_avail >= (txr)->oact_hi_desc)
411
412#endif /* _IF_IGB_H_ */