1 /******************************************************************************
3 Copyright (c) 2001-2012, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
33 /*$FreeBSD: src/sys/dev/ixgbe/ixgbe_82599.c,v 1.8 2012/07/05 20:51:44 jfv Exp $*/
35 #include "ixgbe_type.h"
36 #include "ixgbe_82599.h"
37 #include "ixgbe_api.h"
38 #include "ixgbe_common.h"
39 #include "ixgbe_phy.h"
41 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
42 ixgbe_link_speed speed,
44 bool autoneg_wait_to_complete);
45 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
46 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
47 u16 offset, u16 *data);
48 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
49 u16 words, u16 *data);
51 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
53 struct ixgbe_mac_info *mac = &hw->mac;
55 DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
57 /* enable the laser control functions for SFP+ fiber */
58 if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
59 mac->ops.disable_tx_laser =
60 &ixgbe_disable_tx_laser_multispeed_fiber;
61 mac->ops.enable_tx_laser =
62 &ixgbe_enable_tx_laser_multispeed_fiber;
63 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
66 mac->ops.disable_tx_laser = NULL;
67 mac->ops.enable_tx_laser = NULL;
68 mac->ops.flap_tx_laser = NULL;
71 if (hw->phy.multispeed_fiber) {
72 /* Set up dual speed SFP+ support */
73 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
75 if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
76 (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
77 hw->phy.smart_speed == ixgbe_smart_speed_on) &&
78 !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
79 mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
81 mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
87 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init
88 * @hw: pointer to hardware structure
90 * Initialize any function pointers that were not able to be
91 * set during init_shared_code because the PHY/SFP type was
92 * not known. Perform the SFP init if necessary.
95 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
97 struct ixgbe_mac_info *mac = &hw->mac;
98 struct ixgbe_phy_info *phy = &hw->phy;
99 s32 ret_val = IXGBE_SUCCESS;
101 DEBUGFUNC("ixgbe_init_phy_ops_82599");
103 /* Identify the PHY or SFP module */
104 ret_val = phy->ops.identify(hw);
105 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
106 goto init_phy_ops_out;
108 /* Setup function pointers based on detected SFP module and speeds */
109 ixgbe_init_mac_link_ops_82599(hw);
110 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
111 hw->phy.ops.reset = NULL;
113 /* If copper media, overwrite with copper function pointers */
114 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
115 mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
116 mac->ops.get_link_capabilities =
117 &ixgbe_get_copper_link_capabilities_generic;
120 /* Set necessary function pointers based on phy type */
121 switch (hw->phy.type) {
123 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
124 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
125 phy->ops.get_firmware_version =
126 &ixgbe_get_phy_firmware_version_tnx;
135 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
137 s32 ret_val = IXGBE_SUCCESS;
140 u16 list_offset, data_offset, data_value;
142 DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
144 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
145 ixgbe_init_mac_link_ops_82599(hw);
147 hw->phy.ops.reset = NULL;
149 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
151 if (ret_val != IXGBE_SUCCESS)
154 /* PHY config will finish before releasing the semaphore */
155 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
156 IXGBE_GSSR_MAC_CSR_SM);
157 if (ret_val != IXGBE_SUCCESS) {
158 ret_val = IXGBE_ERR_SWFW_SYNC;
162 hw->eeprom.ops.read(hw, ++data_offset, &data_value);
163 while (data_value != 0xffff) {
164 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
165 IXGBE_WRITE_FLUSH(hw);
166 hw->eeprom.ops.read(hw, ++data_offset, &data_value);
169 /* Release the semaphore */
170 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
171 /* Delay obtaining semaphore again to allow FW access */
172 msec_delay(hw->eeprom.semaphore_delay);
174 /* Now restart DSP by setting Restart_AN and clearing LMS */
175 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
176 IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
177 IXGBE_AUTOC_AN_RESTART));
179 /* Wait for AN to leave state 0 */
180 for (i = 0; i < 10; i++) {
182 reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
183 if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
186 if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
187 DEBUGOUT("sfp module setup not complete\n");
188 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
192 /* Restart DSP by setting Restart_AN and return to SFI mode */
193 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
194 IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
195 IXGBE_AUTOC_AN_RESTART));
203 * ixgbe_init_ops_82599 - Inits func ptrs and MAC type
204 * @hw: pointer to hardware structure
206 * Initialize the function pointers and assign the MAC type for 82599.
207 * Does not touch the hardware.
210 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
212 struct ixgbe_mac_info *mac = &hw->mac;
213 struct ixgbe_phy_info *phy = &hw->phy;
214 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
217 DEBUGFUNC("ixgbe_init_ops_82599");
219 ret_val = ixgbe_init_phy_ops_generic(hw);
220 ret_val = ixgbe_init_ops_generic(hw);
223 phy->ops.identify = &ixgbe_identify_phy_82599;
224 phy->ops.init = &ixgbe_init_phy_ops_82599;
227 mac->ops.reset_hw = &ixgbe_reset_hw_82599;
228 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
229 mac->ops.get_media_type = &ixgbe_get_media_type_82599;
230 mac->ops.get_supported_physical_layer =
231 &ixgbe_get_supported_physical_layer_82599;
232 mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
233 mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
234 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
235 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
236 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
237 mac->ops.start_hw = &ixgbe_start_hw_82599;
238 mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
239 mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
240 mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
241 mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
242 mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
244 /* RAR, Multicast, VLAN */
245 mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
246 mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
247 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
248 mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
249 mac->rar_highwater = 1;
250 mac->ops.set_vfta = &ixgbe_set_vfta_generic;
251 mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
252 mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
253 mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
254 mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
255 mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
256 mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
259 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
260 mac->ops.check_link = &ixgbe_check_mac_link_generic;
261 mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
262 ixgbe_init_mac_link_ops_82599(hw);
264 mac->mcft_size = 128;
266 mac->num_rar_entries = 128;
267 mac->rx_pb_size = 512;
268 mac->max_tx_queues = 128;
269 mac->max_rx_queues = 128;
270 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
272 mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
273 IXGBE_FWSM_MODE_MASK) ? TRUE : FALSE;
275 hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
278 eeprom->ops.read = &ixgbe_read_eeprom_82599;
279 eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599;
281 /* Manageability interface */
282 mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
289 * ixgbe_get_link_capabilities_82599 - Determines link capabilities
290 * @hw: pointer to hardware structure
291 * @speed: pointer to link speed
292 * @negotiation: TRUE when autoneg or autotry is enabled
294 * Determines the link capabilities by reading the AUTOC register.
296 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
297 ixgbe_link_speed *speed,
300 s32 status = IXGBE_SUCCESS;
303 DEBUGFUNC("ixgbe_get_link_capabilities_82599");
306 /* Check if 1G SFP module. */
307 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
308 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
309 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
310 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
311 *speed = IXGBE_LINK_SPEED_1GB_FULL;
317 * Determine link capabilities based on the stored value of AUTOC,
318 * which represents EEPROM defaults. If AUTOC value has not
319 * been stored, use the current register values.
321 if (hw->mac.orig_link_settings_stored)
322 autoc = hw->mac.orig_autoc;
324 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
326 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
327 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
328 *speed = IXGBE_LINK_SPEED_1GB_FULL;
329 *negotiation = FALSE;
332 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
333 *speed = IXGBE_LINK_SPEED_10GB_FULL;
334 *negotiation = FALSE;
337 case IXGBE_AUTOC_LMS_1G_AN:
338 *speed = IXGBE_LINK_SPEED_1GB_FULL;
342 case IXGBE_AUTOC_LMS_10G_SERIAL:
343 *speed = IXGBE_LINK_SPEED_10GB_FULL;
344 *negotiation = FALSE;
347 case IXGBE_AUTOC_LMS_KX4_KX_KR:
348 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
349 *speed = IXGBE_LINK_SPEED_UNKNOWN;
350 if (autoc & IXGBE_AUTOC_KR_SUPP)
351 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
352 if (autoc & IXGBE_AUTOC_KX4_SUPP)
353 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
354 if (autoc & IXGBE_AUTOC_KX_SUPP)
355 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
359 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
360 *speed = IXGBE_LINK_SPEED_100_FULL;
361 if (autoc & IXGBE_AUTOC_KR_SUPP)
362 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
363 if (autoc & IXGBE_AUTOC_KX4_SUPP)
364 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
365 if (autoc & IXGBE_AUTOC_KX_SUPP)
366 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
370 case IXGBE_AUTOC_LMS_SGMII_1G_100M:
371 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
372 *negotiation = FALSE;
376 status = IXGBE_ERR_LINK_SETUP;
381 if (hw->phy.multispeed_fiber) {
382 *speed |= IXGBE_LINK_SPEED_10GB_FULL |
383 IXGBE_LINK_SPEED_1GB_FULL;
392 * ixgbe_get_media_type_82599 - Get media type
393 * @hw: pointer to hardware structure
395 * Returns the media type (fiber, copper, backplane)
397 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
399 enum ixgbe_media_type media_type;
401 DEBUGFUNC("ixgbe_get_media_type_82599");
403 /* Detect if there is a copper PHY attached. */
404 switch (hw->phy.type) {
405 case ixgbe_phy_cu_unknown:
407 media_type = ixgbe_media_type_copper;
413 switch (hw->device_id) {
414 case IXGBE_DEV_ID_82599_KX4:
415 case IXGBE_DEV_ID_82599_KX4_MEZZ:
416 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
417 case IXGBE_DEV_ID_82599_KR:
418 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
419 case IXGBE_DEV_ID_82599_XAUI_LOM:
420 /* Default device ID is mezzanine card KX/KX4 */
421 media_type = ixgbe_media_type_backplane;
423 case IXGBE_DEV_ID_82599_SFP:
424 case IXGBE_DEV_ID_82599_SFP_FCOE:
425 case IXGBE_DEV_ID_82599_SFP_EM:
426 case IXGBE_DEV_ID_82599_SFP_SF2:
427 case IXGBE_DEV_ID_82599EN_SFP:
428 media_type = ixgbe_media_type_fiber;
430 case IXGBE_DEV_ID_82599_CX4:
431 media_type = ixgbe_media_type_cx4;
433 case IXGBE_DEV_ID_82599_T3_LOM:
434 media_type = ixgbe_media_type_copper;
437 media_type = ixgbe_media_type_unknown;
445 * ixgbe_start_mac_link_82599 - Setup MAC link settings
446 * @hw: pointer to hardware structure
447 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
449 * Configures link settings based on values in the ixgbe_hw struct.
450 * Restarts the link. Performs autonegotiation if needed.
452 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
453 bool autoneg_wait_to_complete)
458 s32 status = IXGBE_SUCCESS;
460 DEBUGFUNC("ixgbe_start_mac_link_82599");
464 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
465 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
466 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
468 /* Only poll for autoneg to complete if specified to do so */
469 if (autoneg_wait_to_complete) {
470 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
471 IXGBE_AUTOC_LMS_KX4_KX_KR ||
472 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
473 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
474 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
475 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
476 links_reg = 0; /* Just in case Autoneg time = 0 */
477 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
478 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
479 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
483 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
484 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
485 DEBUGOUT("Autoneg did not complete.\n");
490 /* Add delay to filter out noises during initial link setup */
497 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
498 * @hw: pointer to hardware structure
500 * The base drivers may require better control over SFP+ module
501 * PHY states. This includes selectively shutting down the Tx
502 * laser on the PHY, effectively halting physical link.
504 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
506 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
508 /* Disable tx laser; allow 100us to go dark per spec */
509 esdp_reg |= IXGBE_ESDP_SDP3;
510 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
511 IXGBE_WRITE_FLUSH(hw);
516 * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
517 * @hw: pointer to hardware structure
519 * The base drivers may require better control over SFP+ module
520 * PHY states. This includes selectively turning on the Tx
521 * laser on the PHY, effectively starting physical link.
523 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
525 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
527 /* Enable tx laser; allow 100ms to light up */
528 esdp_reg &= ~IXGBE_ESDP_SDP3;
529 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
530 IXGBE_WRITE_FLUSH(hw);
535 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
536 * @hw: pointer to hardware structure
538 * When the driver changes the link speeds that it can support,
539 * it sets autotry_restart to TRUE to indicate that we need to
540 * initiate a new autotry session with the link partner. To do
541 * so, we set the speed then disable and re-enable the tx laser, to
542 * alert the link partner that it also needs to restart autotry on its
543 * end. This is consistent with TRUE clause 37 autoneg, which also
544 * involves a loss of signal.
546 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
548 DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
550 if (hw->mac.autotry_restart) {
551 ixgbe_disable_tx_laser_multispeed_fiber(hw);
552 ixgbe_enable_tx_laser_multispeed_fiber(hw);
553 hw->mac.autotry_restart = FALSE;
558 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
559 * @hw: pointer to hardware structure
560 * @speed: new link speed
561 * @autoneg: TRUE if autonegotiation enabled
562 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
564 * Set the link speed in the AUTOC register and restarts link.
566 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
567 ixgbe_link_speed speed, bool autoneg,
568 bool autoneg_wait_to_complete)
570 s32 status = IXGBE_SUCCESS;
571 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
572 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
574 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
576 bool link_up = FALSE;
579 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
581 /* Mask off requested but non-supported speeds */
582 status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
583 if (status != IXGBE_SUCCESS)
589 * Try each speed one by one, highest priority first. We do this in
590 * software because 10gb fiber doesn't support speed autonegotiation.
592 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
594 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
596 /* If we already have link at this speed, just jump out */
597 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
598 if (status != IXGBE_SUCCESS)
601 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
604 /* Set the module link speed */
605 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
606 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
607 IXGBE_WRITE_FLUSH(hw);
609 /* Allow module to change analog characteristics (1G->10G) */
612 status = ixgbe_setup_mac_link_82599(hw,
613 IXGBE_LINK_SPEED_10GB_FULL,
615 autoneg_wait_to_complete);
616 if (status != IXGBE_SUCCESS)
619 /* Flap the tx laser if it has not already been done */
620 ixgbe_flap_tx_laser(hw);
623 * Wait for the controller to acquire link. Per IEEE 802.3ap,
624 * Section 73.10.2, we may have to wait up to 500ms if KR is
625 * attempted. 82599 uses the same timing for 10g SFI.
627 for (i = 0; i < 5; i++) {
628 /* Wait for the link partner to also set speed */
631 /* If we have link, just jump out */
632 status = ixgbe_check_link(hw, &link_speed,
634 if (status != IXGBE_SUCCESS)
642 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
644 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
645 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
647 /* If we already have link at this speed, just jump out */
648 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
649 if (status != IXGBE_SUCCESS)
652 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
655 /* Set the module link speed */
656 esdp_reg &= ~IXGBE_ESDP_SDP5;
657 esdp_reg |= IXGBE_ESDP_SDP5_DIR;
658 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
659 IXGBE_WRITE_FLUSH(hw);
661 /* Allow module to change analog characteristics (10G->1G) */
664 status = ixgbe_setup_mac_link_82599(hw,
665 IXGBE_LINK_SPEED_1GB_FULL,
667 autoneg_wait_to_complete);
668 if (status != IXGBE_SUCCESS)
671 /* Flap the tx laser if it has not already been done */
672 ixgbe_flap_tx_laser(hw);
674 /* Wait for the link partner to also set speed */
677 /* If we have link, just jump out */
678 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
679 if (status != IXGBE_SUCCESS)
687 * We didn't get link. Configure back to the highest speed we tried,
688 * (if there was more than one). We call ourselves back with just the
689 * single highest speed that the user requested.
692 status = ixgbe_setup_mac_link_multispeed_fiber(hw,
693 highest_link_speed, autoneg, autoneg_wait_to_complete);
696 /* Set autoneg_advertised value based on input link speed */
697 hw->phy.autoneg_advertised = 0;
699 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
700 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
702 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
703 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
709 * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
710 * @hw: pointer to hardware structure
711 * @speed: new link speed
712 * @autoneg: TRUE if autonegotiation enabled
713 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
715 * Implements the Intel SmartSpeed algorithm.
717 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
718 ixgbe_link_speed speed, bool autoneg,
719 bool autoneg_wait_to_complete)
721 s32 status = IXGBE_SUCCESS;
722 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
724 bool link_up = FALSE;
725 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
727 DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
729 /* Set autoneg_advertised value based on input link speed */
730 hw->phy.autoneg_advertised = 0;
732 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
733 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
735 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
736 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
738 if (speed & IXGBE_LINK_SPEED_100_FULL)
739 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
742 * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the
743 * autoneg advertisement if link is unable to be established at the
744 * highest negotiated rate. This can sometimes happen due to integrity
745 * issues with the physical media connection.
748 /* First, try to get link with full advertisement */
749 hw->phy.smart_speed_active = FALSE;
750 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
751 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
752 autoneg_wait_to_complete);
753 if (status != IXGBE_SUCCESS)
757 * Wait for the controller to acquire link. Per IEEE 802.3ap,
758 * Section 73.10.2, we may have to wait up to 500ms if KR is
759 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
760 * Table 9 in the AN MAS.
762 for (i = 0; i < 5; i++) {
765 /* If we have link, just jump out */
766 status = ixgbe_check_link(hw, &link_speed, &link_up,
768 if (status != IXGBE_SUCCESS)
777 * We didn't get link. If we advertised KR plus one of KX4/KX
778 * (or BX4/BX), then disable KR and try again.
780 if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
781 ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
784 /* Turn SmartSpeed on to disable KR support */
785 hw->phy.smart_speed_active = TRUE;
786 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
787 autoneg_wait_to_complete);
788 if (status != IXGBE_SUCCESS)
792 * Wait for the controller to acquire link. 600ms will allow for
793 * the AN link_fail_inhibit_timer as well for multiple cycles of
794 * parallel detect, both 10g and 1g. This allows for the maximum
795 * connect attempts as defined in the AN MAS table 73-7.
797 for (i = 0; i < 6; i++) {
800 /* If we have link, just jump out */
801 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
802 if (status != IXGBE_SUCCESS)
809 /* We didn't get link. Turn SmartSpeed back off. */
810 hw->phy.smart_speed_active = FALSE;
811 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
812 autoneg_wait_to_complete);
815 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
816 DEBUGOUT("Smartspeed has downgraded the link speed "
817 "from the maximum advertised\n");
822 * ixgbe_setup_mac_link_82599 - Set MAC link speed
823 * @hw: pointer to hardware structure
824 * @speed: new link speed
825 * @autoneg: TRUE if autonegotiation enabled
826 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
828 * Set the link speed in the AUTOC register and restarts link.
830 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
831 ixgbe_link_speed speed, bool autoneg,
832 bool autoneg_wait_to_complete)
834 s32 status = IXGBE_SUCCESS;
835 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
836 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
837 u32 start_autoc = autoc;
839 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
840 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
841 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
844 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
846 DEBUGFUNC("ixgbe_setup_mac_link_82599");
848 /* Check to see if speed passed in is supported. */
849 status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
850 if (status != IXGBE_SUCCESS)
853 speed &= link_capabilities;
855 if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
856 status = IXGBE_ERR_LINK_SETUP;
860 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
861 if (hw->mac.orig_link_settings_stored)
862 orig_autoc = hw->mac.orig_autoc;
866 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
867 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
868 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
869 /* Set KX4/KX/KR support according to speed requested */
870 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
871 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
872 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
873 autoc |= IXGBE_AUTOC_KX4_SUPP;
874 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
875 (hw->phy.smart_speed_active == FALSE))
876 autoc |= IXGBE_AUTOC_KR_SUPP;
877 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
878 autoc |= IXGBE_AUTOC_KX_SUPP;
879 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
880 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
881 link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
882 /* Switch from 1G SFI to 10G SFI if requested */
883 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
884 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
885 autoc &= ~IXGBE_AUTOC_LMS_MASK;
886 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
888 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
889 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
890 /* Switch from 10G SFI to 1G SFI if requested */
891 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
892 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
893 autoc &= ~IXGBE_AUTOC_LMS_MASK;
895 autoc |= IXGBE_AUTOC_LMS_1G_AN;
897 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
901 if (autoc != start_autoc) {
903 autoc |= IXGBE_AUTOC_AN_RESTART;
904 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
906 /* Only poll for autoneg to complete if specified to do so */
907 if (autoneg_wait_to_complete) {
908 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
909 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
910 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
911 links_reg = 0; /*Just in case Autoneg time=0*/
912 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
914 IXGBE_READ_REG(hw, IXGBE_LINKS);
915 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
919 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
921 IXGBE_ERR_AUTONEG_NOT_COMPLETE;
922 DEBUGOUT("Autoneg did not complete.\n");
927 /* Add delay to filter out noises during initial link setup */
936 * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
937 * @hw: pointer to hardware structure
938 * @speed: new link speed
939 * @autoneg: TRUE if autonegotiation enabled
940 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
942 * Restarts link on PHY and MAC based on settings passed in.
944 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
945 ixgbe_link_speed speed,
947 bool autoneg_wait_to_complete)
951 DEBUGFUNC("ixgbe_setup_copper_link_82599");
953 /* Setup the PHY according to input speed */
954 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
955 autoneg_wait_to_complete);
957 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
963 * ixgbe_reset_hw_82599 - Perform hardware reset
964 * @hw: pointer to hardware structure
966 * Resets the hardware by resetting the transmit and receive units, masks
967 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
970 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
972 ixgbe_link_speed link_speed;
974 u32 ctrl, i, autoc, autoc2;
975 bool link_up = FALSE;
977 DEBUGFUNC("ixgbe_reset_hw_82599");
979 /* Call adapter stop to disable tx/rx and clear interrupts */
980 status = hw->mac.ops.stop_adapter(hw);
981 if (status != IXGBE_SUCCESS)
984 /* flush pending Tx transactions */
985 ixgbe_clear_tx_pending(hw);
987 /* PHY ops must be identified and initialized prior to reset */
989 /* Identify PHY and related function pointers */
990 status = hw->phy.ops.init(hw);
992 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
995 /* Setup SFP module if there is one present. */
996 if (hw->phy.sfp_setup_needed) {
997 status = hw->mac.ops.setup_sfp(hw);
998 hw->phy.sfp_setup_needed = FALSE;
1001 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1005 if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
1006 hw->phy.ops.reset(hw);
1010 * Issue global reset to the MAC. Needs to be SW reset if link is up.
1011 * If link reset is used when link is up, it might reset the PHY when
1012 * mng is using it. If link is down or the flag to force full link
1013 * reset is set, then perform link reset.
1015 ctrl = IXGBE_CTRL_LNK_RST;
1016 if (!hw->force_full_reset) {
1017 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
1019 ctrl = IXGBE_CTRL_RST;
1022 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
1023 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
1024 IXGBE_WRITE_FLUSH(hw);
1026 /* Poll for reset bit to self-clear indicating reset is complete */
1027 for (i = 0; i < 10; i++) {
1029 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1030 if (!(ctrl & IXGBE_CTRL_RST_MASK))
1034 if (ctrl & IXGBE_CTRL_RST_MASK) {
1035 status = IXGBE_ERR_RESET_FAILED;
1036 DEBUGOUT("Reset polling failed to complete.\n");
1042 * Double resets are required for recovery from certain error
1043 * conditions. Between resets, it is necessary to stall to allow time
1044 * for any pending HW events to complete.
1046 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
1047 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
1052 * Store the original AUTOC/AUTOC2 values if they have not been
1053 * stored off yet. Otherwise restore the stored original
1054 * values since the reset operation sets back to defaults.
1056 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1057 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1058 if (hw->mac.orig_link_settings_stored == FALSE) {
1059 hw->mac.orig_autoc = autoc;
1060 hw->mac.orig_autoc2 = autoc2;
1061 hw->mac.orig_link_settings_stored = TRUE;
1063 if (autoc != hw->mac.orig_autoc)
1064 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
1065 IXGBE_AUTOC_AN_RESTART));
1067 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
1068 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
1069 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
1070 autoc2 |= (hw->mac.orig_autoc2 &
1071 IXGBE_AUTOC2_UPPER_MASK);
1072 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1076 /* Store the permanent mac address */
1077 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
1080 * Store MAC address from RAR0, clear receive address registers, and
1081 * clear the multicast table. Also reset num_rar_entries to 128,
1082 * since we modify this value when programming the SAN MAC address.
1084 hw->mac.num_rar_entries = 128;
1085 hw->mac.ops.init_rx_addrs(hw);
1087 /* Store the permanent SAN mac address */
1088 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
1090 /* Add the SAN MAC address to the RAR only if it's a valid address */
1091 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
1092 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
1093 hw->mac.san_addr, 0, IXGBE_RAH_AV);
1095 /* Save the SAN MAC RAR index */
1096 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
1098 /* Reserve the last RAR for the SAN MAC address */
1099 hw->mac.num_rar_entries--;
1102 /* Store the alternative WWNN/WWPN prefix */
1103 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1104 &hw->mac.wwpn_prefix);
1111 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1112 * @hw: pointer to hardware structure
1114 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1117 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1118 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1120 DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
1123 * Before starting reinitialization process,
1124 * FDIRCMD.CMD must be zero.
1126 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1127 if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1128 IXGBE_FDIRCMD_CMD_MASK))
1132 if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1133 DEBUGOUT("Flow Director previous command isn't complete, "
1134 "aborting table re-initialization.\n");
1135 return IXGBE_ERR_FDIR_REINIT_FAILED;
1138 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1139 IXGBE_WRITE_FLUSH(hw);
1141 * 82599 adapters flow director init flow cannot be restarted,
1142 * Workaround 82599 silicon errata by performing the following steps
1143 * before re-writing the FDIRCTRL control register with the same value.
1144 * - write 1 to bit 8 of FDIRCMD register &
1145 * - write 0 to bit 8 of FDIRCMD register
1147 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1148 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1149 IXGBE_FDIRCMD_CLEARHT));
1150 IXGBE_WRITE_FLUSH(hw);
1151 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1152 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1153 ~IXGBE_FDIRCMD_CLEARHT));
1154 IXGBE_WRITE_FLUSH(hw);
1156 * Clear FDIR Hash register to clear any leftover hashes
1157 * waiting to be programmed.
1159 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1160 IXGBE_WRITE_FLUSH(hw);
1162 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1163 IXGBE_WRITE_FLUSH(hw);
1165 /* Poll init-done after we write FDIRCTRL register */
1166 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1167 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1168 IXGBE_FDIRCTRL_INIT_DONE)
1172 if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1173 DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1174 return IXGBE_ERR_FDIR_REINIT_FAILED;
1177 /* Clear FDIR statistics registers (read to clear) */
1178 IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1179 IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1180 IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1181 IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1182 IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1184 return IXGBE_SUCCESS;
1188 * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
1189 * @hw: pointer to hardware structure
1190 * @fdirctrl: value to write to flow director control register
1192 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1196 DEBUGFUNC("ixgbe_fdir_enable_82599");
1198 /* Prime the keys for hashing */
1199 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1200 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1203 * Poll init-done after we write the register. Estimated times:
1204 * 10G: PBALLOC = 11b, timing is 60us
1205 * 1G: PBALLOC = 11b, timing is 600us
1206 * 100M: PBALLOC = 11b, timing is 6ms
1208 * Multiple these timings by 4 if under full Rx load
1210 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1211 * 1 msec per poll time. If we're at line rate and drop to 100M, then
1212 * this might not finish in our poll time, but we can live with that
1215 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1216 IXGBE_WRITE_FLUSH(hw);
1217 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1218 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1219 IXGBE_FDIRCTRL_INIT_DONE)
1224 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1225 DEBUGOUT("Flow Director poll time exceeded!\n");
1229 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1230 * @hw: pointer to hardware structure
1231 * @fdirctrl: value to write to flow director control register, initially
1232 * contains just the value of the Rx packet buffer allocation
1234 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1236 DEBUGFUNC("ixgbe_init_fdir_signature_82599");
1239 * Continue setup of fdirctrl register bits:
1240 * Move the flexible bytes to use the ethertype - shift 6 words
1241 * Set the maximum length per hash bucket to 0xA filters
1242 * Send interrupt when 64 filters are left
1244 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1245 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1246 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1248 /* write hashes and fdirctrl register, poll for completion */
1249 ixgbe_fdir_enable_82599(hw, fdirctrl);
1251 return IXGBE_SUCCESS;
1255 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1256 * @hw: pointer to hardware structure
1257 * @fdirctrl: value to write to flow director control register, initially
1258 * contains just the value of the Rx packet buffer allocation
1260 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1262 DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
1265 * Continue setup of fdirctrl register bits:
1266 * Turn perfect match filtering on
1267 * Report hash in RSS field of Rx wb descriptor
1268 * Initialize the drop queue
1269 * Move the flexible bytes to use the ethertype - shift 6 words
1270 * Set the maximum length per hash bucket to 0xA filters
1271 * Send interrupt when 64 (0x4 * 16) filters are left
1273 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
1274 IXGBE_FDIRCTRL_REPORT_STATUS |
1275 (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
1276 (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1277 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1278 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1280 /* write hashes and fdirctrl register, poll for completion */
1281 ixgbe_fdir_enable_82599(hw, fdirctrl);
1283 return IXGBE_SUCCESS;
1287 * These defines allow us to quickly generate all of the necessary instructions
1288 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1289 * for values 0 through 15
1291 #define IXGBE_ATR_COMMON_HASH_KEY \
1292 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1293 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1296 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1297 common_hash ^= lo_hash_dword >> n; \
1298 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1299 bucket_hash ^= lo_hash_dword >> n; \
1300 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
1301 sig_hash ^= lo_hash_dword << (16 - n); \
1302 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
1303 common_hash ^= hi_hash_dword >> n; \
1304 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1305 bucket_hash ^= hi_hash_dword >> n; \
1306 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1307 sig_hash ^= hi_hash_dword << (16 - n); \
1311 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1312 * @stream: input bitstream to compute the hash on
1314 * This function is almost identical to the function above but contains
1315 * several optomizations such as unwinding all of the loops, letting the
1316 * compiler work out all of the conditional ifs since the keys are static
1317 * defines, and computing two keys at once since the hashed dword stream
1318 * will be the same for both keys.
1320 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1321 union ixgbe_atr_hash_dword common)
1323 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1324 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1326 /* record the flow_vm_vlan bits as they are a key part to the hash */
1327 flow_vm_vlan = IXGBE_NTOHL(input.dword);
1329 /* generate common hash dword */
1330 hi_hash_dword = IXGBE_NTOHL(common.dword);
1332 /* low dword is word swapped version of common */
1333 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1335 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1336 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1338 /* Process bits 0 and 16 */
1339 IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
1342 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1343 * delay this because bit 0 of the stream should not be processed
1344 * so we do not add the vlan until after bit 0 was processed
1346 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1348 /* Process remaining 30 bit of the key */
1349 IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
1350 IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
1351 IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
1352 IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
1353 IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
1354 IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
1355 IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
1356 IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
1357 IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
1358 IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
1359 IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
1360 IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
1361 IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
1362 IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
1363 IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
1365 /* combine common_hash result with signature and bucket hashes */
1366 bucket_hash ^= common_hash;
1367 bucket_hash &= IXGBE_ATR_HASH_MASK;
1369 sig_hash ^= common_hash << 16;
1370 sig_hash &= IXGBE_ATR_HASH_MASK << 16;
1372 /* return completed signature hash */
1373 return sig_hash ^ bucket_hash;
1377 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1378 * @hw: pointer to hardware structure
1379 * @input: unique input dword
1380 * @common: compressed common input dword
1381 * @queue: queue index to direct traffic to
1383 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1384 union ixgbe_atr_hash_dword input,
1385 union ixgbe_atr_hash_dword common,
1391 DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
1394 * Get the flow_type in order to program FDIRCMD properly
1395 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
1397 switch (input.formatted.flow_type) {
1398 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1399 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1400 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1401 case IXGBE_ATR_FLOW_TYPE_TCPV6:
1402 case IXGBE_ATR_FLOW_TYPE_UDPV6:
1403 case IXGBE_ATR_FLOW_TYPE_SCTPV6:
1406 DEBUGOUT(" Error on flow type input\n");
1407 return IXGBE_ERR_CONFIG;
1410 /* configure FDIRCMD register */
1411 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1412 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1413 fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1414 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1417 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1418 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
1420 fdirhashcmd = (u64)fdircmd << 32;
1421 fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
1422 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1424 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1426 return IXGBE_SUCCESS;
1429 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
1432 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1433 bucket_hash ^= lo_hash_dword >> n; \
1434 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1435 bucket_hash ^= hi_hash_dword >> n; \
1439 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
1440 * @atr_input: input bitstream to compute the hash on
1441 * @input_mask: mask for the input bitstream
1443 * This function serves two main purposes. First it applys the input_mask
1444 * to the atr_input resulting in a cleaned up atr_input data stream.
1445 * Secondly it computes the hash and stores it in the bkt_hash field at
1446 * the end of the input byte stream. This way it will be available for
1447 * future use without needing to recompute the hash.
1449 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1450 union ixgbe_atr_input *input_mask)
1453 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1454 u32 bucket_hash = 0;
1456 /* Apply masks to input data */
1457 input->dword_stream[0] &= input_mask->dword_stream[0];
1458 input->dword_stream[1] &= input_mask->dword_stream[1];
1459 input->dword_stream[2] &= input_mask->dword_stream[2];
1460 input->dword_stream[3] &= input_mask->dword_stream[3];
1461 input->dword_stream[4] &= input_mask->dword_stream[4];
1462 input->dword_stream[5] &= input_mask->dword_stream[5];
1463 input->dword_stream[6] &= input_mask->dword_stream[6];
1464 input->dword_stream[7] &= input_mask->dword_stream[7];
1465 input->dword_stream[8] &= input_mask->dword_stream[8];
1466 input->dword_stream[9] &= input_mask->dword_stream[9];
1467 input->dword_stream[10] &= input_mask->dword_stream[10];
1469 /* record the flow_vm_vlan bits as they are a key part to the hash */
1470 flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
1472 /* generate common hash dword */
1473 hi_hash_dword = IXGBE_NTOHL(input->dword_stream[1] ^
1474 input->dword_stream[2] ^
1475 input->dword_stream[3] ^
1476 input->dword_stream[4] ^
1477 input->dword_stream[5] ^
1478 input->dword_stream[6] ^
1479 input->dword_stream[7] ^
1480 input->dword_stream[8] ^
1481 input->dword_stream[9] ^
1482 input->dword_stream[10]);
1484 /* low dword is word swapped version of common */
1485 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1487 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1488 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1490 /* Process bits 0 and 16 */
1491 IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
1494 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1495 * delay this because bit 0 of the stream should not be processed
1496 * so we do not add the vlan until after bit 0 was processed
1498 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1500 /* Process remaining 30 bit of the key */
1501 IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
1502 IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
1503 IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
1504 IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
1505 IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
1506 IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
1507 IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
1508 IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
1509 IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
1510 IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
1511 IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
1512 IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
1513 IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
1514 IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
1515 IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
1518 * Limit hash to 13 bits since max bucket count is 8K.
1519 * Store result at the end of the input stream.
1521 input->formatted.bkt_hash = bucket_hash & 0x1FFF;
1525 * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
1526 * @input_mask: mask to be bit swapped
1528 * The source and destination port masks for flow director are bit swapped
1529 * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
1530 * generate a correctly swapped value we need to bit swap the mask and that
1531 * is what is accomplished by this function.
1533 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
1535 u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
1536 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1537 mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
1538 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1539 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1540 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
1541 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
1545 * These two macros are meant to address the fact that we have registers
1546 * that are either all or in part big-endian. As a result on big-endian
1547 * systems we will end up byte swapping the value to little-endian before
1548 * it is byte swapped again and written to the hardware in the original
1549 * big-endian format.
1551 #define IXGBE_STORE_AS_BE32(_value) \
1552 (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
1553 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
1555 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
1556 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
1558 #define IXGBE_STORE_AS_BE16(_value) \
1559 IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
1561 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
1562 union ixgbe_atr_input *input_mask)
1564 /* mask IPv6 since it is currently not supported */
1565 u32 fdirm = IXGBE_FDIRM_DIPv6;
1568 DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
1571 * Program the relevant mask registers. If src/dst_port or src/dst_addr
1572 * are zero, then assume a full mask for that field. Also assume that
1573 * a VLAN of 0 is unspecified, so mask that out as well. L4type
1574 * cannot be masked out in this implementation.
1576 * This also assumes IPv4 only. IPv6 masking isn't supported at this
1580 /* verify bucket hash is cleared on hash generation */
1581 if (input_mask->formatted.bkt_hash)
1582 DEBUGOUT(" bucket hash should always be 0 in mask\n");
1584 /* Program FDIRM and verify partial masks */
1585 switch (input_mask->formatted.vm_pool & 0x7F) {
1587 fdirm |= IXGBE_FDIRM_POOL;
1591 DEBUGOUT(" Error on vm pool mask\n");
1592 return IXGBE_ERR_CONFIG;
1595 switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
1597 fdirm |= IXGBE_FDIRM_L4P;
1598 if (input_mask->formatted.dst_port ||
1599 input_mask->formatted.src_port) {
1600 DEBUGOUT(" Error on src/dst port mask\n");
1601 return IXGBE_ERR_CONFIG;
1603 case IXGBE_ATR_L4TYPE_MASK:
1606 DEBUGOUT(" Error on flow type mask\n");
1607 return IXGBE_ERR_CONFIG;
1610 switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
1612 /* mask VLAN ID, fall through to mask VLAN priority */
1613 fdirm |= IXGBE_FDIRM_VLANID;
1615 /* mask VLAN priority */
1616 fdirm |= IXGBE_FDIRM_VLANP;
1619 /* mask VLAN ID only, fall through */
1620 fdirm |= IXGBE_FDIRM_VLANID;
1622 /* no VLAN fields masked */
1625 DEBUGOUT(" Error on VLAN mask\n");
1626 return IXGBE_ERR_CONFIG;
1629 switch (input_mask->formatted.flex_bytes & 0xFFFF) {
1631 /* Mask Flex Bytes, fall through */
1632 fdirm |= IXGBE_FDIRM_FLEX;
1636 DEBUGOUT(" Error on flexible byte mask\n");
1637 return IXGBE_ERR_CONFIG;
1640 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1641 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1643 /* store the TCP/UDP port masks, bit reversed from port layout */
1644 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
1646 /* write both the same so that UDP and TCP use the same mask */
1647 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1648 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1650 /* store source and destination IP masks (big-enian) */
1651 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1652 ~input_mask->formatted.src_ip[0]);
1653 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1654 ~input_mask->formatted.dst_ip[0]);
1656 return IXGBE_SUCCESS;
1659 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
1660 union ixgbe_atr_input *input,
1661 u16 soft_id, u8 queue)
1663 u32 fdirport, fdirvlan, fdirhash, fdircmd;
1665 DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
1667 /* currently IPv6 is not supported, must be programmed with 0 */
1668 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
1669 input->formatted.src_ip[0]);
1670 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
1671 input->formatted.src_ip[1]);
1672 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
1673 input->formatted.src_ip[2]);
1675 /* record the source address (big-endian) */
1676 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
1678 /* record the first 32 bits of the destination address (big-endian) */
1679 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
1681 /* record source and destination port (little-endian)*/
1682 fdirport = IXGBE_NTOHS(input->formatted.dst_port);
1683 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1684 fdirport |= IXGBE_NTOHS(input->formatted.src_port);
1685 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1687 /* record vlan (little-endian) and flex_bytes(big-endian) */
1688 fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
1689 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1690 fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
1691 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1693 /* configure FDIRHASH register */
1694 fdirhash = input->formatted.bkt_hash;
1695 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1696 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1699 * flush all previous writes to make certain registers are
1700 * programmed prior to issuing the command
1702 IXGBE_WRITE_FLUSH(hw);
1704 /* configure FDIRCMD register */
1705 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1706 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1707 if (queue == IXGBE_FDIR_DROP_QUEUE)
1708 fdircmd |= IXGBE_FDIRCMD_DROP;
1709 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1710 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1711 fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1713 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1715 return IXGBE_SUCCESS;
1718 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
1719 union ixgbe_atr_input *input,
1725 s32 err = IXGBE_SUCCESS;
1727 /* configure FDIRHASH register */
1728 fdirhash = input->formatted.bkt_hash;
1729 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1730 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1732 /* flush hash to HW */
1733 IXGBE_WRITE_FLUSH(hw);
1735 /* Query if filter is present */
1736 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1738 for (retry_count = 10; retry_count; retry_count--) {
1739 /* allow 10us for query to process */
1741 /* verify query completed successfully */
1742 fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
1743 if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
1748 err = IXGBE_ERR_FDIR_REINIT_FAILED;
1750 /* if filter exists in hardware then remove it */
1751 if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
1752 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1753 IXGBE_WRITE_FLUSH(hw);
1754 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1755 IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
1762 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
1763 * @hw: pointer to hardware structure
1764 * @input: input bitstream
1765 * @input_mask: mask for the input bitstream
1766 * @soft_id: software index for the filters
1767 * @queue: queue index to direct traffic to
1769 * Note that the caller to this function must lock before calling, since the
1770 * hardware writes must be protected from one another.
1772 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1773 union ixgbe_atr_input *input,
1774 union ixgbe_atr_input *input_mask,
1775 u16 soft_id, u8 queue)
1777 s32 err = IXGBE_ERR_CONFIG;
1779 DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
1782 * Check flow_type formatting, and bail out before we touch the hardware
1783 * if there's a configuration issue
1785 switch (input->formatted.flow_type) {
1786 case IXGBE_ATR_FLOW_TYPE_IPV4:
1787 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
1788 if (input->formatted.dst_port || input->formatted.src_port) {
1789 DEBUGOUT(" Error on src/dst port\n");
1790 return IXGBE_ERR_CONFIG;
1793 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1794 if (input->formatted.dst_port || input->formatted.src_port) {
1795 DEBUGOUT(" Error on src/dst port\n");
1796 return IXGBE_ERR_CONFIG;
1798 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1799 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1800 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
1801 IXGBE_ATR_L4TYPE_MASK;
1804 DEBUGOUT(" Error on flow type input\n");
1808 /* program input mask into the HW */
1809 err = ixgbe_fdir_set_input_mask_82599(hw, input_mask);
1813 /* apply mask and compute/store hash */
1814 ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
1816 /* program filters to filter memory */
1817 return ixgbe_fdir_write_perfect_filter_82599(hw, input,
1822 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
1823 * @hw: pointer to hardware structure
1824 * @reg: analog register to read
1827 * Performs read operation to Omer analog register specified.
1829 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
1833 DEBUGFUNC("ixgbe_read_analog_reg8_82599");
1835 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
1837 IXGBE_WRITE_FLUSH(hw);
1839 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
1840 *val = (u8)core_ctl;
1842 return IXGBE_SUCCESS;
1846 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
1847 * @hw: pointer to hardware structure
1848 * @reg: atlas register to write
1849 * @val: value to write
1851 * Performs write operation to Omer analog register specified.
1853 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
1857 DEBUGFUNC("ixgbe_write_analog_reg8_82599");
1859 core_ctl = (reg << 8) | val;
1860 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
1861 IXGBE_WRITE_FLUSH(hw);
1864 return IXGBE_SUCCESS;
1868 * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
1869 * @hw: pointer to hardware structure
1871 * Starts the hardware using the generic start_hw function
1872 * and the generation start_hw function.
1873 * Then performs revision-specific operations, if any.
1875 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
1877 s32 ret_val = IXGBE_SUCCESS;
1879 DEBUGFUNC("ixgbe_start_hw_82599");
1881 ret_val = ixgbe_start_hw_generic(hw);
1882 if (ret_val != IXGBE_SUCCESS)
1885 ret_val = ixgbe_start_hw_gen2(hw);
1886 if (ret_val != IXGBE_SUCCESS)
1889 /* We need to run link autotry after the driver loads */
1890 hw->mac.autotry_restart = TRUE;
1892 if (ret_val == IXGBE_SUCCESS)
1893 ret_val = ixgbe_verify_fw_version_82599(hw);
1899 * ixgbe_identify_phy_82599 - Get physical layer module
1900 * @hw: pointer to hardware structure
1902 * Determines the physical layer module found on the current adapter.
1903 * If PHY already detected, maintains current PHY type in hw struct,
1904 * otherwise executes the PHY detection routine.
1906 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
1908 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
1910 DEBUGFUNC("ixgbe_identify_phy_82599");
1912 /* Detect PHY if not unknown - returns success if already detected. */
1913 status = ixgbe_identify_phy_generic(hw);
1914 if (status != IXGBE_SUCCESS) {
1915 /* 82599 10GBASE-T requires an external PHY */
1916 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
1919 status = ixgbe_identify_module_generic(hw);
1922 /* Set PHY type none if no PHY detected */
1923 if (hw->phy.type == ixgbe_phy_unknown) {
1924 hw->phy.type = ixgbe_phy_none;
1925 status = IXGBE_SUCCESS;
1928 /* Return error if SFP module has been detected but is not supported */
1929 if (hw->phy.type == ixgbe_phy_sfp_unsupported)
1930 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1937 * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
1938 * @hw: pointer to hardware structure
1940 * Determines physical layer capabilities of the current configuration.
1942 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
1944 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1945 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1946 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1947 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
1948 u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1949 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1950 u16 ext_ability = 0;
1951 u8 comp_codes_10g = 0;
1952 u8 comp_codes_1g = 0;
1954 DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
1956 hw->phy.ops.identify(hw);
1958 switch (hw->phy.type) {
1960 case ixgbe_phy_cu_unknown:
1961 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1962 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1963 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1964 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1965 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1966 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1967 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1968 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1974 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1975 case IXGBE_AUTOC_LMS_1G_AN:
1976 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1977 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
1978 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
1979 IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1982 /* SFI mode so read SFP module */
1985 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1986 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
1987 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1988 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
1989 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1990 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
1991 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
1994 case IXGBE_AUTOC_LMS_10G_SERIAL:
1995 if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
1996 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
1998 } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
2001 case IXGBE_AUTOC_LMS_KX4_KX_KR:
2002 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
2003 if (autoc & IXGBE_AUTOC_KX_SUPP)
2004 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2005 if (autoc & IXGBE_AUTOC_KX4_SUPP)
2006 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2007 if (autoc & IXGBE_AUTOC_KR_SUPP)
2008 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2017 /* SFP check must be done last since DA modules are sometimes used to
2018 * test KR mode - we need to id KR mode correctly before SFP module.
2019 * Call identify_sfp because the pluggable module may have changed */
2020 hw->phy.ops.identify_sfp(hw);
2021 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
2024 switch (hw->phy.type) {
2025 case ixgbe_phy_sfp_passive_tyco:
2026 case ixgbe_phy_sfp_passive_unknown:
2027 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
2029 case ixgbe_phy_sfp_ftl_active:
2030 case ixgbe_phy_sfp_active_unknown:
2031 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
2033 case ixgbe_phy_sfp_avago:
2034 case ixgbe_phy_sfp_ftl:
2035 case ixgbe_phy_sfp_intel:
2036 case ixgbe_phy_sfp_unknown:
2037 hw->phy.ops.read_i2c_eeprom(hw,
2038 IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
2039 hw->phy.ops.read_i2c_eeprom(hw,
2040 IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
2041 if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
2042 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
2043 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
2044 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
2045 else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
2046 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
2047 else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE)
2048 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX;
2055 return physical_layer;
2059 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
2060 * @hw: pointer to hardware structure
2061 * @regval: register value to write to RXCTRL
2063 * Enables the Rx DMA unit for 82599
2065 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2068 DEBUGFUNC("ixgbe_enable_rx_dma_82599");
2071 * Workaround for 82599 silicon errata when enabling the Rx datapath.
2072 * If traffic is incoming before we enable the Rx unit, it could hang
2073 * the Rx DMA unit. Therefore, make sure the security engine is
2074 * completely disabled prior to enabling the Rx unit.
2077 hw->mac.ops.disable_sec_rx_path(hw);
2079 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2081 hw->mac.ops.enable_sec_rx_path(hw);
2083 return IXGBE_SUCCESS;
2087 * ixgbe_verify_fw_version_82599 - verify fw version for 82599
2088 * @hw: pointer to hardware structure
2090 * Verifies that installed the firmware version is 0.6 or higher
2091 * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
2093 * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
2094 * if the FW version is not supported.
2096 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2098 s32 status = IXGBE_ERR_EEPROM_VERSION;
2099 u16 fw_offset, fw_ptp_cfg_offset;
2102 DEBUGFUNC("ixgbe_verify_fw_version_82599");
2104 /* firmware check is only necessary for SFI devices */
2105 if (hw->phy.media_type != ixgbe_media_type_fiber) {
2106 status = IXGBE_SUCCESS;
2107 goto fw_version_out;
2110 /* get the offset to the Firmware Module block */
2111 hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2113 if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2114 goto fw_version_out;
2116 /* get the offset to the Pass Through Patch Configuration block */
2117 hw->eeprom.ops.read(hw, (fw_offset +
2118 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
2119 &fw_ptp_cfg_offset);
2121 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2122 goto fw_version_out;
2124 /* get the firmware version */
2125 hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
2126 IXGBE_FW_PATCH_VERSION_4), &fw_version);
2128 if (fw_version > 0x5)
2129 status = IXGBE_SUCCESS;
2136 * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
2137 * @hw: pointer to hardware structure
2139 * Returns TRUE if the LESM FW module is present and enabled. Otherwise
2140 * returns FALSE. Smart Speed must be disabled if LESM FW module is enabled.
2142 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
2144 bool lesm_enabled = FALSE;
2145 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
2148 DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
2150 /* get the offset to the Firmware Module block */
2151 status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2153 if ((status != IXGBE_SUCCESS) ||
2154 (fw_offset == 0) || (fw_offset == 0xFFFF))
2157 /* get the offset to the LESM Parameters block */
2158 status = hw->eeprom.ops.read(hw, (fw_offset +
2159 IXGBE_FW_LESM_PARAMETERS_PTR),
2160 &fw_lesm_param_offset);
2162 if ((status != IXGBE_SUCCESS) ||
2163 (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
2166 /* get the lesm state word */
2167 status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
2168 IXGBE_FW_LESM_STATE_1),
2171 if ((status == IXGBE_SUCCESS) &&
2172 (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
2173 lesm_enabled = TRUE;
2176 return lesm_enabled;
2180 * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
2181 * fastest available method
2183 * @hw: pointer to hardware structure
2184 * @offset: offset of word in EEPROM to read
2185 * @words: number of words
2186 * @data: word(s) read from the EEPROM
2188 * Retrieves 16 bit word(s) read from EEPROM
2190 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
2191 u16 words, u16 *data)
2193 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2194 s32 ret_val = IXGBE_ERR_CONFIG;
2196 DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
2199 * If EEPROM is detected and can be addressed using 14 bits,
2200 * use EERD otherwise use bit bang
2202 if ((eeprom->type == ixgbe_eeprom_spi) &&
2203 (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
2204 ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
2207 ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
2215 * ixgbe_read_eeprom_82599 - Read EEPROM word using
2216 * fastest available method
2218 * @hw: pointer to hardware structure
2219 * @offset: offset of word in the EEPROM to read
2220 * @data: word read from the EEPROM
2222 * Reads a 16 bit word from the EEPROM
2224 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2225 u16 offset, u16 *data)
2227 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2228 s32 ret_val = IXGBE_ERR_CONFIG;
2230 DEBUGFUNC("ixgbe_read_eeprom_82599");
2233 * If EEPROM is detected and can be addressed using 14 bits,
2234 * use EERD otherwise use bit bang
2236 if ((eeprom->type == ixgbe_eeprom_spi) &&
2237 (offset <= IXGBE_EERD_MAX_ADDR))
2238 ret_val = ixgbe_read_eerd_generic(hw, offset, data);
2240 ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);