1 /******************************************************************************
3 Copyright (c) 2001-2013, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #include "ixgbe_common.h"
36 #include "ixgbe_phy.h"
37 #include "ixgbe_dcb.h"
38 #include "ixgbe_dcb_82599.h"
39 #include "ixgbe_api.h"
41 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
42 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
43 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
44 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
45 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
46 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
48 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
49 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
50 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
51 static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
53 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
54 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
56 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
57 u16 words, u16 *data);
58 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
59 u16 words, u16 *data);
60 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
64 * ixgbe_init_ops_generic - Inits function ptrs
65 * @hw: pointer to the hardware structure
67 * Initialize the function pointers.
69 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
71 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
72 struct ixgbe_mac_info *mac = &hw->mac;
73 u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
75 DEBUGFUNC("ixgbe_init_ops_generic");
78 eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
79 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
80 if (eec & IXGBE_EEC_PRES) {
81 eeprom->ops.read = &ixgbe_read_eerd_generic;
82 eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_generic;
84 eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
85 eeprom->ops.read_buffer =
86 &ixgbe_read_eeprom_buffer_bit_bang_generic;
88 eeprom->ops.write = &ixgbe_write_eeprom_generic;
89 eeprom->ops.write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic;
90 eeprom->ops.validate_checksum =
91 &ixgbe_validate_eeprom_checksum_generic;
92 eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
93 eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic;
96 mac->ops.init_hw = &ixgbe_init_hw_generic;
97 mac->ops.reset_hw = NULL;
98 mac->ops.start_hw = &ixgbe_start_hw_generic;
99 mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
100 mac->ops.get_media_type = NULL;
101 mac->ops.get_supported_physical_layer = NULL;
102 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic;
103 mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
104 mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
105 mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
106 mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
107 mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync;
108 mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync;
111 mac->ops.led_on = &ixgbe_led_on_generic;
112 mac->ops.led_off = &ixgbe_led_off_generic;
113 mac->ops.blink_led_start = &ixgbe_blink_led_start_generic;
114 mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic;
116 /* RAR, Multicast, VLAN */
117 mac->ops.set_rar = &ixgbe_set_rar_generic;
118 mac->ops.clear_rar = &ixgbe_clear_rar_generic;
119 mac->ops.insert_mac_addr = NULL;
120 mac->ops.set_vmdq = NULL;
121 mac->ops.clear_vmdq = NULL;
122 mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
123 mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
124 mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
125 mac->ops.enable_mc = &ixgbe_enable_mc_generic;
126 mac->ops.disable_mc = &ixgbe_disable_mc_generic;
127 mac->ops.clear_vfta = NULL;
128 mac->ops.set_vfta = NULL;
129 mac->ops.set_vlvf = NULL;
130 mac->ops.init_uta_tables = NULL;
133 mac->ops.fc_enable = &ixgbe_fc_enable_generic;
136 mac->ops.get_link_capabilities = NULL;
137 mac->ops.setup_link = NULL;
138 mac->ops.check_link = NULL;
139 mac->ops.dmac_config = NULL;
140 mac->ops.dmac_update_tcs = NULL;
141 mac->ops.dmac_config_tcs = NULL;
143 return IXGBE_SUCCESS;
147 * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
149 * @hw: pointer to hardware structure
151 * This function returns TRUE if the device supports flow control
152 * autonegotiation, and FALSE if it does not.
155 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
157 bool supported = FALSE;
158 ixgbe_link_speed speed;
161 DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
163 switch (hw->phy.media_type) {
164 case ixgbe_media_type_fiber_fixed:
165 case ixgbe_media_type_fiber:
166 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
167 /* if link is down, assume supported */
169 supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
174 case ixgbe_media_type_backplane:
177 case ixgbe_media_type_copper:
178 /* only some copper devices support flow control autoneg */
179 switch (hw->device_id) {
180 case IXGBE_DEV_ID_82599_T3_LOM:
181 case IXGBE_DEV_ID_X540T:
182 case IXGBE_DEV_ID_X540_BYPASS:
192 ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
193 "Device %x does not support flow control autoneg",
199 * ixgbe_setup_fc - Set up flow control
200 * @hw: pointer to hardware structure
202 * Called at init time to set up flow control.
204 static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
206 s32 ret_val = IXGBE_SUCCESS;
207 u32 reg = 0, reg_bp = 0;
209 bool got_lock = FALSE;
211 DEBUGFUNC("ixgbe_setup_fc");
214 * Validate the requested mode. Strict IEEE mode does not allow
215 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
217 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
218 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
219 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
220 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
225 * 10gig parts do not have a word in the EEPROM to determine the
226 * default flow control setting, so we explicitly set it to full.
228 if (hw->fc.requested_mode == ixgbe_fc_default)
229 hw->fc.requested_mode = ixgbe_fc_full;
232 * Set up the 1G and 10G flow control advertisement registers so the
233 * HW will be able to do fc autoneg once the cable is plugged in. If
234 * we link at 10G, the 1G advertisement is harmless and vice versa.
236 switch (hw->phy.media_type) {
237 case ixgbe_media_type_fiber_fixed:
238 case ixgbe_media_type_fiber:
239 case ixgbe_media_type_backplane:
240 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
241 reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
243 case ixgbe_media_type_copper:
244 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
245 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu);
252 * The possible values of fc.requested_mode are:
253 * 0: Flow control is completely disabled
254 * 1: Rx flow control is enabled (we can receive pause frames,
255 * but not send pause frames).
256 * 2: Tx flow control is enabled (we can send pause frames but
257 * we do not support receiving pause frames).
258 * 3: Both Rx and Tx flow control (symmetric) are enabled.
261 switch (hw->fc.requested_mode) {
263 /* Flow control completely disabled by software override. */
264 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
265 if (hw->phy.media_type == ixgbe_media_type_backplane)
266 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
267 IXGBE_AUTOC_ASM_PAUSE);
268 else if (hw->phy.media_type == ixgbe_media_type_copper)
269 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
271 case ixgbe_fc_tx_pause:
273 * Tx Flow control is enabled, and Rx Flow control is
274 * disabled by software override.
276 reg |= IXGBE_PCS1GANA_ASM_PAUSE;
277 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
278 if (hw->phy.media_type == ixgbe_media_type_backplane) {
279 reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
280 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
281 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
282 reg_cu |= IXGBE_TAF_ASM_PAUSE;
283 reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
286 case ixgbe_fc_rx_pause:
288 * Rx Flow control is enabled and Tx Flow control is
289 * disabled by software override. Since there really
290 * isn't a way to advertise that we are capable of RX
291 * Pause ONLY, we will advertise that we support both
292 * symmetric and asymmetric Rx PAUSE, as such we fall
293 * through to the fc_full statement. Later, we will
294 * disable the adapter's ability to send PAUSE frames.
297 /* Flow control (both Rx and Tx) is enabled by SW override. */
298 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
299 if (hw->phy.media_type == ixgbe_media_type_backplane)
300 reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
301 IXGBE_AUTOC_ASM_PAUSE;
302 else if (hw->phy.media_type == ixgbe_media_type_copper)
303 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
306 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
307 "Flow control param set incorrectly\n");
308 ret_val = IXGBE_ERR_CONFIG;
313 if (hw->mac.type != ixgbe_mac_X540) {
315 * Enable auto-negotiation between the MAC & PHY;
316 * the MAC will advertise clause 37 flow control.
318 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
319 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
321 /* Disable AN timeout */
322 if (hw->fc.strict_ieee)
323 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
325 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
326 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
330 * AUTOC restart handles negotiation of 1G and 10G on backplane
331 * and copper. There is no need to set the PCS1GCTL register.
334 if (hw->phy.media_type == ixgbe_media_type_backplane) {
335 reg_bp |= IXGBE_AUTOC_AN_RESTART;
336 /* Need the SW/FW semaphore around AUTOC writes if 82599 and
337 * LESM is on, likewise reset_pipeline requries the lock as
338 * it also writes AUTOC.
340 if ((hw->mac.type == ixgbe_mac_82599EB) &&
341 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
342 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
343 IXGBE_GSSR_MAC_CSR_SM);
344 if (ret_val != IXGBE_SUCCESS) {
345 ret_val = IXGBE_ERR_SWFW_SYNC;
351 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
352 if (hw->mac.type == ixgbe_mac_82599EB)
353 ixgbe_reset_pipeline_82599(hw);
356 hw->mac.ops.release_swfw_sync(hw,
357 IXGBE_GSSR_MAC_CSR_SM);
358 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
359 (ixgbe_device_supports_autoneg_fc(hw))) {
360 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
361 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
364 DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
370 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
371 * @hw: pointer to hardware structure
373 * Starts the hardware by filling the bus info structure and media type, clears
374 * all on chip counters, initializes receive address registers, multicast
375 * table, VLAN filter table, calls routine to set up link and flow control
376 * settings, and leaves transmit and receive units disabled and uninitialized
378 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
383 DEBUGFUNC("ixgbe_start_hw_generic");
385 /* Set the media type */
386 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
388 /* PHY ops initialization must be done in reset_hw() */
390 /* Clear the VLAN filter table */
391 hw->mac.ops.clear_vfta(hw);
393 /* Clear statistics registers */
394 hw->mac.ops.clear_hw_cntrs(hw);
396 /* Set No Snoop Disable */
397 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
398 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
399 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
400 IXGBE_WRITE_FLUSH(hw);
402 /* Setup flow control */
403 ret_val = ixgbe_setup_fc(hw);
404 if (ret_val != IXGBE_SUCCESS)
407 /* Clear adapter stopped flag */
408 hw->adapter_stopped = FALSE;
415 * ixgbe_start_hw_gen2 - Init sequence for common device family
416 * @hw: pointer to hw structure
418 * Performs the init sequence common to the second generation
420 * Devices in the second generation:
424 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
429 /* Clear the rate limiters */
430 for (i = 0; i < hw->mac.max_tx_queues; i++) {
431 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
432 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
434 IXGBE_WRITE_FLUSH(hw);
436 /* Disable relaxed ordering */
437 for (i = 0; i < hw->mac.max_tx_queues; i++) {
438 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
439 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
440 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
443 for (i = 0; i < hw->mac.max_rx_queues; i++) {
444 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
445 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
446 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
447 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
450 return IXGBE_SUCCESS;
454 * ixgbe_init_hw_generic - Generic hardware initialization
455 * @hw: pointer to hardware structure
457 * Initialize the hardware by resetting the hardware, filling the bus info
458 * structure and media type, clears all on chip counters, initializes receive
459 * address registers, multicast table, VLAN filter table, calls routine to set
460 * up link and flow control settings, and leaves transmit and receive units
461 * disabled and uninitialized
463 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
467 DEBUGFUNC("ixgbe_init_hw_generic");
469 /* Reset the hardware */
470 status = hw->mac.ops.reset_hw(hw);
472 if (status == IXGBE_SUCCESS) {
474 status = hw->mac.ops.start_hw(hw);
481 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
482 * @hw: pointer to hardware structure
484 * Clears all hardware statistics counters by reading them from the hardware
485 * Statistics counters are clear on read.
487 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
491 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
493 IXGBE_READ_REG(hw, IXGBE_CRCERRS);
494 IXGBE_READ_REG(hw, IXGBE_ILLERRC);
495 IXGBE_READ_REG(hw, IXGBE_ERRBC);
496 IXGBE_READ_REG(hw, IXGBE_MSPDC);
497 for (i = 0; i < 8; i++)
498 IXGBE_READ_REG(hw, IXGBE_MPC(i));
500 IXGBE_READ_REG(hw, IXGBE_MLFC);
501 IXGBE_READ_REG(hw, IXGBE_MRFC);
502 IXGBE_READ_REG(hw, IXGBE_RLEC);
503 IXGBE_READ_REG(hw, IXGBE_LXONTXC);
504 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
505 if (hw->mac.type >= ixgbe_mac_82599EB) {
506 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
507 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
509 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
510 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
513 for (i = 0; i < 8; i++) {
514 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
515 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
516 if (hw->mac.type >= ixgbe_mac_82599EB) {
517 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
518 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
520 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
521 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
524 if (hw->mac.type >= ixgbe_mac_82599EB)
525 for (i = 0; i < 8; i++)
526 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
527 IXGBE_READ_REG(hw, IXGBE_PRC64);
528 IXGBE_READ_REG(hw, IXGBE_PRC127);
529 IXGBE_READ_REG(hw, IXGBE_PRC255);
530 IXGBE_READ_REG(hw, IXGBE_PRC511);
531 IXGBE_READ_REG(hw, IXGBE_PRC1023);
532 IXGBE_READ_REG(hw, IXGBE_PRC1522);
533 IXGBE_READ_REG(hw, IXGBE_GPRC);
534 IXGBE_READ_REG(hw, IXGBE_BPRC);
535 IXGBE_READ_REG(hw, IXGBE_MPRC);
536 IXGBE_READ_REG(hw, IXGBE_GPTC);
537 IXGBE_READ_REG(hw, IXGBE_GORCL);
538 IXGBE_READ_REG(hw, IXGBE_GORCH);
539 IXGBE_READ_REG(hw, IXGBE_GOTCL);
540 IXGBE_READ_REG(hw, IXGBE_GOTCH);
541 if (hw->mac.type == ixgbe_mac_82598EB)
542 for (i = 0; i < 8; i++)
543 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
544 IXGBE_READ_REG(hw, IXGBE_RUC);
545 IXGBE_READ_REG(hw, IXGBE_RFC);
546 IXGBE_READ_REG(hw, IXGBE_ROC);
547 IXGBE_READ_REG(hw, IXGBE_RJC);
548 IXGBE_READ_REG(hw, IXGBE_MNGPRC);
549 IXGBE_READ_REG(hw, IXGBE_MNGPDC);
550 IXGBE_READ_REG(hw, IXGBE_MNGPTC);
551 IXGBE_READ_REG(hw, IXGBE_TORL);
552 IXGBE_READ_REG(hw, IXGBE_TORH);
553 IXGBE_READ_REG(hw, IXGBE_TPR);
554 IXGBE_READ_REG(hw, IXGBE_TPT);
555 IXGBE_READ_REG(hw, IXGBE_PTC64);
556 IXGBE_READ_REG(hw, IXGBE_PTC127);
557 IXGBE_READ_REG(hw, IXGBE_PTC255);
558 IXGBE_READ_REG(hw, IXGBE_PTC511);
559 IXGBE_READ_REG(hw, IXGBE_PTC1023);
560 IXGBE_READ_REG(hw, IXGBE_PTC1522);
561 IXGBE_READ_REG(hw, IXGBE_MPTC);
562 IXGBE_READ_REG(hw, IXGBE_BPTC);
563 for (i = 0; i < 16; i++) {
564 IXGBE_READ_REG(hw, IXGBE_QPRC(i));
565 IXGBE_READ_REG(hw, IXGBE_QPTC(i));
566 if (hw->mac.type >= ixgbe_mac_82599EB) {
567 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
568 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
569 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
570 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
571 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
573 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
574 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
578 if (hw->mac.type == ixgbe_mac_X540) {
580 ixgbe_identify_phy(hw);
581 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
582 IXGBE_MDIO_PCS_DEV_TYPE, &i);
583 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
584 IXGBE_MDIO_PCS_DEV_TYPE, &i);
585 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
586 IXGBE_MDIO_PCS_DEV_TYPE, &i);
587 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
588 IXGBE_MDIO_PCS_DEV_TYPE, &i);
591 return IXGBE_SUCCESS;
595 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
596 * @hw: pointer to hardware structure
597 * @pba_num: stores the part number string from the EEPROM
598 * @pba_num_size: part number string buffer length
600 * Reads the part number string from the EEPROM.
602 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
611 DEBUGFUNC("ixgbe_read_pba_string_generic");
613 if (pba_num == NULL) {
614 DEBUGOUT("PBA string buffer was null\n");
615 return IXGBE_ERR_INVALID_ARGUMENT;
618 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
620 DEBUGOUT("NVM Read Error\n");
624 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
626 DEBUGOUT("NVM Read Error\n");
631 * if data is not ptr guard the PBA must be in legacy format which
632 * means pba_ptr is actually our second data word for the PBA number
633 * and we can decode it into an ascii string
635 if (data != IXGBE_PBANUM_PTR_GUARD) {
636 DEBUGOUT("NVM PBA number is not stored as string\n");
638 /* we will need 11 characters to store the PBA */
639 if (pba_num_size < 11) {
640 DEBUGOUT("PBA string buffer too small\n");
641 return IXGBE_ERR_NO_SPACE;
644 /* extract hex string from data and pba_ptr */
645 pba_num[0] = (data >> 12) & 0xF;
646 pba_num[1] = (data >> 8) & 0xF;
647 pba_num[2] = (data >> 4) & 0xF;
648 pba_num[3] = data & 0xF;
649 pba_num[4] = (pba_ptr >> 12) & 0xF;
650 pba_num[5] = (pba_ptr >> 8) & 0xF;
653 pba_num[8] = (pba_ptr >> 4) & 0xF;
654 pba_num[9] = pba_ptr & 0xF;
656 /* put a null character on the end of our string */
659 /* switch all the data but the '-' to hex char */
660 for (offset = 0; offset < 10; offset++) {
661 if (pba_num[offset] < 0xA)
662 pba_num[offset] += '0';
663 else if (pba_num[offset] < 0x10)
664 pba_num[offset] += 'A' - 0xA;
667 return IXGBE_SUCCESS;
670 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
672 DEBUGOUT("NVM Read Error\n");
676 if (length == 0xFFFF || length == 0) {
677 DEBUGOUT("NVM PBA number section invalid length\n");
678 return IXGBE_ERR_PBA_SECTION;
681 /* check if pba_num buffer is big enough */
682 if (pba_num_size < (((u32)length * 2) - 1)) {
683 DEBUGOUT("PBA string buffer too small\n");
684 return IXGBE_ERR_NO_SPACE;
687 /* trim pba length from start of string */
691 for (offset = 0; offset < length; offset++) {
692 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
694 DEBUGOUT("NVM Read Error\n");
697 pba_num[offset * 2] = (u8)(data >> 8);
698 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
700 pba_num[offset * 2] = '\0';
702 return IXGBE_SUCCESS;
706 * ixgbe_read_pba_num_generic - Reads part number from EEPROM
707 * @hw: pointer to hardware structure
708 * @pba_num: stores the part number from the EEPROM
710 * Reads the part number from the EEPROM.
712 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
717 DEBUGFUNC("ixgbe_read_pba_num_generic");
719 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
721 DEBUGOUT("NVM Read Error\n");
723 } else if (data == IXGBE_PBANUM_PTR_GUARD) {
724 DEBUGOUT("NVM Not supported\n");
725 return IXGBE_NOT_IMPLEMENTED;
727 *pba_num = (u32)(data << 16);
729 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
731 DEBUGOUT("NVM Read Error\n");
736 return IXGBE_SUCCESS;
741 * @hw: pointer to the HW structure
742 * @eeprom_buf: optional pointer to EEPROM image
743 * @eeprom_buf_size: size of EEPROM image in words
744 * @max_pba_block_size: PBA block size limit
745 * @pba: pointer to output PBA structure
747 * Reads PBA from EEPROM image when eeprom_buf is not NULL.
748 * Reads PBA from physical EEPROM device when eeprom_buf is NULL.
751 s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
752 u32 eeprom_buf_size, u16 max_pba_block_size,
753 struct ixgbe_pba *pba)
759 return IXGBE_ERR_PARAM;
761 if (eeprom_buf == NULL) {
762 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
767 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
768 pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
769 pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
771 return IXGBE_ERR_PARAM;
775 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
776 if (pba->pba_block == NULL)
777 return IXGBE_ERR_PARAM;
779 ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
785 if (pba_block_size > max_pba_block_size)
786 return IXGBE_ERR_PARAM;
788 if (eeprom_buf == NULL) {
789 ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
795 if (eeprom_buf_size > (u32)(pba->word[1] +
796 pba->pba_block[0])) {
797 memcpy(pba->pba_block,
798 &eeprom_buf[pba->word[1]],
799 pba_block_size * sizeof(u16));
801 return IXGBE_ERR_PARAM;
806 return IXGBE_SUCCESS;
810 * ixgbe_write_pba_raw
811 * @hw: pointer to the HW structure
812 * @eeprom_buf: optional pointer to EEPROM image
813 * @eeprom_buf_size: size of EEPROM image in words
814 * @pba: pointer to PBA structure
816 * Writes PBA to EEPROM image when eeprom_buf is not NULL.
817 * Writes PBA to physical EEPROM device when eeprom_buf is NULL.
820 s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
821 u32 eeprom_buf_size, struct ixgbe_pba *pba)
826 return IXGBE_ERR_PARAM;
828 if (eeprom_buf == NULL) {
829 ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
834 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
835 eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
836 eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
838 return IXGBE_ERR_PARAM;
842 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
843 if (pba->pba_block == NULL)
844 return IXGBE_ERR_PARAM;
846 if (eeprom_buf == NULL) {
847 ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
853 if (eeprom_buf_size > (u32)(pba->word[1] +
854 pba->pba_block[0])) {
855 memcpy(&eeprom_buf[pba->word[1]],
857 pba->pba_block[0] * sizeof(u16));
859 return IXGBE_ERR_PARAM;
864 return IXGBE_SUCCESS;
868 * ixgbe_get_pba_block_size
869 * @hw: pointer to the HW structure
870 * @eeprom_buf: optional pointer to EEPROM image
871 * @eeprom_buf_size: size of EEPROM image in words
872 * @pba_data_size: pointer to output variable
874 * Returns the size of the PBA block in words. Function operates on EEPROM
875 * image if the eeprom_buf pointer is not NULL otherwise it accesses physical
879 s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
880 u32 eeprom_buf_size, u16 *pba_block_size)
886 DEBUGFUNC("ixgbe_get_pba_block_size");
888 if (eeprom_buf == NULL) {
889 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
894 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
895 pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
896 pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
898 return IXGBE_ERR_PARAM;
902 if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
903 if (eeprom_buf == NULL) {
904 ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
909 if (eeprom_buf_size > pba_word[1])
910 length = eeprom_buf[pba_word[1] + 0];
912 return IXGBE_ERR_PARAM;
915 if (length == 0xFFFF || length == 0)
916 return IXGBE_ERR_PBA_SECTION;
918 /* PBA number in legacy format, there is no PBA Block. */
922 if (pba_block_size != NULL)
923 *pba_block_size = length;
925 return IXGBE_SUCCESS;
929 * ixgbe_get_mac_addr_generic - Generic get MAC address
930 * @hw: pointer to hardware structure
931 * @mac_addr: Adapter MAC address
933 * Reads the adapter's MAC address from first Receive Address Register (RAR0)
934 * A reset of the adapter must be performed prior to calling this function
935 * in order for the MAC address to have been loaded from the EEPROM into RAR0
937 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
943 DEBUGFUNC("ixgbe_get_mac_addr_generic");
945 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
946 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
948 for (i = 0; i < 4; i++)
949 mac_addr[i] = (u8)(rar_low >> (i*8));
951 for (i = 0; i < 2; i++)
952 mac_addr[i+4] = (u8)(rar_high >> (i*8));
954 return IXGBE_SUCCESS;
958 * ixgbe_set_pci_config_data_generic - Generic store PCI bus info
959 * @hw: pointer to hardware structure
960 * @link_status: the link status returned by the PCI config space
962 * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
964 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
966 struct ixgbe_mac_info *mac = &hw->mac;
968 hw->bus.type = ixgbe_bus_type_pci_express;
970 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
971 case IXGBE_PCI_LINK_WIDTH_1:
972 hw->bus.width = ixgbe_bus_width_pcie_x1;
974 case IXGBE_PCI_LINK_WIDTH_2:
975 hw->bus.width = ixgbe_bus_width_pcie_x2;
977 case IXGBE_PCI_LINK_WIDTH_4:
978 hw->bus.width = ixgbe_bus_width_pcie_x4;
980 case IXGBE_PCI_LINK_WIDTH_8:
981 hw->bus.width = ixgbe_bus_width_pcie_x8;
984 hw->bus.width = ixgbe_bus_width_unknown;
988 switch (link_status & IXGBE_PCI_LINK_SPEED) {
989 case IXGBE_PCI_LINK_SPEED_2500:
990 hw->bus.speed = ixgbe_bus_speed_2500;
992 case IXGBE_PCI_LINK_SPEED_5000:
993 hw->bus.speed = ixgbe_bus_speed_5000;
995 case IXGBE_PCI_LINK_SPEED_8000:
996 hw->bus.speed = ixgbe_bus_speed_8000;
999 hw->bus.speed = ixgbe_bus_speed_unknown;
1003 mac->ops.set_lan_id(hw);
1007 * ixgbe_get_bus_info_generic - Generic set PCI bus info
1008 * @hw: pointer to hardware structure
1010 * Gets the PCI bus info (speed, width, type) then calls helper function to
1011 * store this data within the ixgbe_hw structure.
1013 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
1017 DEBUGFUNC("ixgbe_get_bus_info_generic");
1019 /* Get the negotiated link width and speed from PCI config space */
1020 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
1022 ixgbe_set_pci_config_data_generic(hw, link_status);
1024 return IXGBE_SUCCESS;
1028 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
1029 * @hw: pointer to the HW structure
1031 * Determines the LAN function id by reading memory-mapped registers
1032 * and swaps the port value if requested.
1034 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
1036 struct ixgbe_bus_info *bus = &hw->bus;
1039 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
1041 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
1042 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
1043 bus->lan_id = bus->func;
1045 /* check for a port swap */
1046 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
1047 if (reg & IXGBE_FACTPS_LFS)
1052 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
1053 * @hw: pointer to hardware structure
1055 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
1056 * disables transmit and receive units. The adapter_stopped flag is used by
1057 * the shared code and drivers to determine if the adapter is in a stopped
1058 * state and should not touch the hardware.
1060 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
1065 DEBUGFUNC("ixgbe_stop_adapter_generic");
1068 * Set the adapter_stopped flag so other driver functions stop touching
1071 hw->adapter_stopped = TRUE;
1073 /* Disable the receive unit */
1074 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
1076 /* Clear interrupt mask to stop interrupts from being generated */
1077 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1079 /* Clear any pending interrupts, flush previous writes */
1080 IXGBE_READ_REG(hw, IXGBE_EICR);
1082 /* Disable the transmit unit. Each queue must be disabled. */
1083 for (i = 0; i < hw->mac.max_tx_queues; i++)
1084 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
1086 /* Disable the receive unit by stopping each queue */
1087 for (i = 0; i < hw->mac.max_rx_queues; i++) {
1088 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1089 reg_val &= ~IXGBE_RXDCTL_ENABLE;
1090 reg_val |= IXGBE_RXDCTL_SWFLSH;
1091 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
1094 /* flush all queues disables */
1095 IXGBE_WRITE_FLUSH(hw);
1099 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
1100 * access and verify no pending requests
1102 return ixgbe_disable_pcie_master(hw);
1106 * ixgbe_led_on_generic - Turns on the software controllable LEDs.
1107 * @hw: pointer to hardware structure
1108 * @index: led number to turn on
1110 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
1112 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1114 DEBUGFUNC("ixgbe_led_on_generic");
1116 /* To turn on the LED, set mode to ON. */
1117 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1118 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
1119 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1120 IXGBE_WRITE_FLUSH(hw);
1122 return IXGBE_SUCCESS;
1126 * ixgbe_led_off_generic - Turns off the software controllable LEDs.
1127 * @hw: pointer to hardware structure
1128 * @index: led number to turn off
1130 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
1132 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1134 DEBUGFUNC("ixgbe_led_off_generic");
1136 /* To turn off the LED, set mode to OFF. */
1137 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1138 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
1139 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1140 IXGBE_WRITE_FLUSH(hw);
1142 return IXGBE_SUCCESS;
1146 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
1147 * @hw: pointer to hardware structure
1149 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1150 * ixgbe_hw struct in order to set up EEPROM access.
1152 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
1154 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1158 DEBUGFUNC("ixgbe_init_eeprom_params_generic");
1160 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1161 eeprom->type = ixgbe_eeprom_none;
1162 /* Set default semaphore delay to 10ms which is a well
1164 eeprom->semaphore_delay = 10;
1165 /* Clear EEPROM page size, it will be initialized as needed */
1166 eeprom->word_page_size = 0;
1169 * Check for EEPROM present first.
1170 * If not present leave as none
1172 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1173 if (eec & IXGBE_EEC_PRES) {
1174 eeprom->type = ixgbe_eeprom_spi;
1177 * SPI EEPROM is assumed here. This code would need to
1178 * change if a future EEPROM is not SPI.
1180 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1181 IXGBE_EEC_SIZE_SHIFT);
1182 eeprom->word_size = 1 << (eeprom_size +
1183 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1186 if (eec & IXGBE_EEC_ADDR_SIZE)
1187 eeprom->address_bits = 16;
1189 eeprom->address_bits = 8;
1190 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
1191 "%d\n", eeprom->type, eeprom->word_size,
1192 eeprom->address_bits);
1195 return IXGBE_SUCCESS;
1199 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
1200 * @hw: pointer to hardware structure
1201 * @offset: offset within the EEPROM to write
1202 * @words: number of word(s)
1203 * @data: 16 bit word(s) to write to EEPROM
1205 * Reads 16 bit word(s) from EEPROM through bit-bang method
1207 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1208 u16 words, u16 *data)
1210 s32 status = IXGBE_SUCCESS;
1213 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
1215 hw->eeprom.ops.init_params(hw);
1218 status = IXGBE_ERR_INVALID_ARGUMENT;
1222 if (offset + words > hw->eeprom.word_size) {
1223 status = IXGBE_ERR_EEPROM;
1228 * The EEPROM page size cannot be queried from the chip. We do lazy
1229 * initialization. It is worth to do that when we write large buffer.
1231 if ((hw->eeprom.word_page_size == 0) &&
1232 (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
1233 ixgbe_detect_eeprom_page_size_generic(hw, offset);
1236 * We cannot hold synchronization semaphores for too long
1237 * to avoid other entity starvation. However it is more efficient
1238 * to read in bursts than synchronizing access for each word.
1240 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1241 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1242 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1243 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
1246 if (status != IXGBE_SUCCESS)
1255 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
1256 * @hw: pointer to hardware structure
1257 * @offset: offset within the EEPROM to be written to
1258 * @words: number of word(s)
1259 * @data: 16 bit word(s) to be written to the EEPROM
1261 * If ixgbe_eeprom_update_checksum is not called after this function, the
1262 * EEPROM will most likely contain an invalid checksum.
1264 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1265 u16 words, u16 *data)
1271 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1273 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1275 /* Prepare the EEPROM for writing */
1276 status = ixgbe_acquire_eeprom(hw);
1278 if (status == IXGBE_SUCCESS) {
1279 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1280 ixgbe_release_eeprom(hw);
1281 status = IXGBE_ERR_EEPROM;
1285 if (status == IXGBE_SUCCESS) {
1286 for (i = 0; i < words; i++) {
1287 ixgbe_standby_eeprom(hw);
1289 /* Send the WRITE ENABLE command (8 bit opcode ) */
1290 ixgbe_shift_out_eeprom_bits(hw,
1291 IXGBE_EEPROM_WREN_OPCODE_SPI,
1292 IXGBE_EEPROM_OPCODE_BITS);
1294 ixgbe_standby_eeprom(hw);
1297 * Some SPI eeproms use the 8th address bit embedded
1300 if ((hw->eeprom.address_bits == 8) &&
1301 ((offset + i) >= 128))
1302 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1304 /* Send the Write command (8-bit opcode + addr) */
1305 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1306 IXGBE_EEPROM_OPCODE_BITS);
1307 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1308 hw->eeprom.address_bits);
1310 page_size = hw->eeprom.word_page_size;
1312 /* Send the data in burst via SPI*/
1315 word = (word >> 8) | (word << 8);
1316 ixgbe_shift_out_eeprom_bits(hw, word, 16);
1321 /* do not wrap around page */
1322 if (((offset + i) & (page_size - 1)) ==
1325 } while (++i < words);
1327 ixgbe_standby_eeprom(hw);
1330 /* Done with writing - release the EEPROM */
1331 ixgbe_release_eeprom(hw);
1338 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1339 * @hw: pointer to hardware structure
1340 * @offset: offset within the EEPROM to be written to
1341 * @data: 16 bit word to be written to the EEPROM
1343 * If ixgbe_eeprom_update_checksum is not called after this function, the
1344 * EEPROM will most likely contain an invalid checksum.
1346 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1350 DEBUGFUNC("ixgbe_write_eeprom_generic");
1352 hw->eeprom.ops.init_params(hw);
1354 if (offset >= hw->eeprom.word_size) {
1355 status = IXGBE_ERR_EEPROM;
1359 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1366 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1367 * @hw: pointer to hardware structure
1368 * @offset: offset within the EEPROM to be read
1369 * @data: read 16 bit words(s) from EEPROM
1370 * @words: number of word(s)
1372 * Reads 16 bit word(s) from EEPROM through bit-bang method
1374 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1375 u16 words, u16 *data)
1377 s32 status = IXGBE_SUCCESS;
1380 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1382 hw->eeprom.ops.init_params(hw);
1385 status = IXGBE_ERR_INVALID_ARGUMENT;
1389 if (offset + words > hw->eeprom.word_size) {
1390 status = IXGBE_ERR_EEPROM;
1395 * We cannot hold synchronization semaphores for too long
1396 * to avoid other entity starvation. However it is more efficient
1397 * to read in bursts than synchronizing access for each word.
1399 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1400 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1401 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1403 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1406 if (status != IXGBE_SUCCESS)
1415 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1416 * @hw: pointer to hardware structure
1417 * @offset: offset within the EEPROM to be read
1418 * @words: number of word(s)
1419 * @data: read 16 bit word(s) from EEPROM
1421 * Reads 16 bit word(s) from EEPROM through bit-bang method
1423 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1424 u16 words, u16 *data)
1428 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1431 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1433 /* Prepare the EEPROM for reading */
1434 status = ixgbe_acquire_eeprom(hw);
1436 if (status == IXGBE_SUCCESS) {
1437 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1438 ixgbe_release_eeprom(hw);
1439 status = IXGBE_ERR_EEPROM;
1443 if (status == IXGBE_SUCCESS) {
1444 for (i = 0; i < words; i++) {
1445 ixgbe_standby_eeprom(hw);
1447 * Some SPI eeproms use the 8th address bit embedded
1450 if ((hw->eeprom.address_bits == 8) &&
1451 ((offset + i) >= 128))
1452 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1454 /* Send the READ command (opcode + addr) */
1455 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1456 IXGBE_EEPROM_OPCODE_BITS);
1457 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1458 hw->eeprom.address_bits);
1460 /* Read the data. */
1461 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1462 data[i] = (word_in >> 8) | (word_in << 8);
1465 /* End this read operation */
1466 ixgbe_release_eeprom(hw);
1473 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1474 * @hw: pointer to hardware structure
1475 * @offset: offset within the EEPROM to be read
1476 * @data: read 16 bit value from EEPROM
1478 * Reads 16 bit value from EEPROM through bit-bang method
1480 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1485 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1487 hw->eeprom.ops.init_params(hw);
1489 if (offset >= hw->eeprom.word_size) {
1490 status = IXGBE_ERR_EEPROM;
1494 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1501 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1502 * @hw: pointer to hardware structure
1503 * @offset: offset of word in the EEPROM to read
1504 * @words: number of word(s)
1505 * @data: 16 bit word(s) from the EEPROM
1507 * Reads a 16 bit word(s) from the EEPROM using the EERD register.
1509 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1510 u16 words, u16 *data)
1513 s32 status = IXGBE_SUCCESS;
1516 DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1518 hw->eeprom.ops.init_params(hw);
1521 status = IXGBE_ERR_INVALID_ARGUMENT;
1522 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1526 if (offset >= hw->eeprom.word_size) {
1527 status = IXGBE_ERR_EEPROM;
1528 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1532 for (i = 0; i < words; i++) {
1533 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1534 IXGBE_EEPROM_RW_REG_START;
1536 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1537 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1539 if (status == IXGBE_SUCCESS) {
1540 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1541 IXGBE_EEPROM_RW_REG_DATA);
1543 DEBUGOUT("Eeprom read timed out\n");
1552 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1553 * @hw: pointer to hardware structure
1554 * @offset: offset within the EEPROM to be used as a scratch pad
1556 * Discover EEPROM page size by writing marching data at given offset.
1557 * This function is called only when we are writing a new large buffer
1558 * at given offset so the data would be overwritten anyway.
1560 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1563 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1564 s32 status = IXGBE_SUCCESS;
1567 DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1569 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1572 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1573 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1574 IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1575 hw->eeprom.word_page_size = 0;
1576 if (status != IXGBE_SUCCESS)
1579 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1580 if (status != IXGBE_SUCCESS)
1584 * When writing in burst more than the actual page size
1585 * EEPROM address wraps around current page.
1587 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1589 DEBUGOUT1("Detected EEPROM page size = %d words.",
1590 hw->eeprom.word_page_size);
1596 * ixgbe_read_eerd_generic - Read EEPROM word using EERD
1597 * @hw: pointer to hardware structure
1598 * @offset: offset of word in the EEPROM to read
1599 * @data: word read from the EEPROM
1601 * Reads a 16 bit word from the EEPROM using the EERD register.
1603 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1605 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1609 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1610 * @hw: pointer to hardware structure
1611 * @offset: offset of word in the EEPROM to write
1612 * @words: number of word(s)
1613 * @data: word(s) write to the EEPROM
1615 * Write a 16 bit word(s) to the EEPROM using the EEWR register.
1617 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1618 u16 words, u16 *data)
1621 s32 status = IXGBE_SUCCESS;
1624 DEBUGFUNC("ixgbe_write_eewr_generic");
1626 hw->eeprom.ops.init_params(hw);
1629 status = IXGBE_ERR_INVALID_ARGUMENT;
1630 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1634 if (offset >= hw->eeprom.word_size) {
1635 status = IXGBE_ERR_EEPROM;
1636 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1640 for (i = 0; i < words; i++) {
1641 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1642 (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1643 IXGBE_EEPROM_RW_REG_START;
1645 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1646 if (status != IXGBE_SUCCESS) {
1647 DEBUGOUT("Eeprom write EEWR timed out\n");
1651 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1653 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1654 if (status != IXGBE_SUCCESS) {
1655 DEBUGOUT("Eeprom write EEWR timed out\n");
1665 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1666 * @hw: pointer to hardware structure
1667 * @offset: offset of word in the EEPROM to write
1668 * @data: word write to the EEPROM
1670 * Write a 16 bit word to the EEPROM using the EEWR register.
1672 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1674 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1678 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1679 * @hw: pointer to hardware structure
1680 * @ee_reg: EEPROM flag for polling
1682 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1683 * read or write is done respectively.
1685 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1689 s32 status = IXGBE_ERR_EEPROM;
1691 DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1693 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1694 if (ee_reg == IXGBE_NVM_POLL_READ)
1695 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1697 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1699 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1700 status = IXGBE_SUCCESS;
1706 if (i == IXGBE_EERD_EEWR_ATTEMPTS)
1707 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1708 "EEPROM read/write done polling timed out");
1714 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1715 * @hw: pointer to hardware structure
1717 * Prepares EEPROM for access using bit-bang method. This function should
1718 * be called before issuing a command to the EEPROM.
1720 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1722 s32 status = IXGBE_SUCCESS;
1726 DEBUGFUNC("ixgbe_acquire_eeprom");
1728 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1730 status = IXGBE_ERR_SWFW_SYNC;
1732 if (status == IXGBE_SUCCESS) {
1733 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1735 /* Request EEPROM Access */
1736 eec |= IXGBE_EEC_REQ;
1737 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1739 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1740 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1741 if (eec & IXGBE_EEC_GNT)
1746 /* Release if grant not acquired */
1747 if (!(eec & IXGBE_EEC_GNT)) {
1748 eec &= ~IXGBE_EEC_REQ;
1749 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1750 DEBUGOUT("Could not acquire EEPROM grant\n");
1752 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1753 status = IXGBE_ERR_EEPROM;
1756 /* Setup EEPROM for Read/Write */
1757 if (status == IXGBE_SUCCESS) {
1758 /* Clear CS and SK */
1759 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1760 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1761 IXGBE_WRITE_FLUSH(hw);
1769 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
1770 * @hw: pointer to hardware structure
1772 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1774 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1776 s32 status = IXGBE_ERR_EEPROM;
1781 DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1784 /* Get SMBI software semaphore between device drivers first */
1785 for (i = 0; i < timeout; i++) {
1787 * If the SMBI bit is 0 when we read it, then the bit will be
1788 * set and we have the semaphore
1790 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1791 if (!(swsm & IXGBE_SWSM_SMBI)) {
1792 status = IXGBE_SUCCESS;
1799 DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1802 * this release is particularly important because our attempts
1803 * above to get the semaphore may have succeeded, and if there
1804 * was a timeout, we should unconditionally clear the semaphore
1805 * bits to free the driver to make progress
1807 ixgbe_release_eeprom_semaphore(hw);
1812 * If the SMBI bit is 0 when we read it, then the bit will be
1813 * set and we have the semaphore
1815 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1816 if (!(swsm & IXGBE_SWSM_SMBI))
1817 status = IXGBE_SUCCESS;
1820 /* Now get the semaphore between SW/FW through the SWESMBI bit */
1821 if (status == IXGBE_SUCCESS) {
1822 for (i = 0; i < timeout; i++) {
1823 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1825 /* Set the SW EEPROM semaphore bit to request access */
1826 swsm |= IXGBE_SWSM_SWESMBI;
1827 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1830 * If we set the bit successfully then we got the
1833 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1834 if (swsm & IXGBE_SWSM_SWESMBI)
1841 * Release semaphores and return error if SW EEPROM semaphore
1842 * was not granted because we don't have access to the EEPROM
1845 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1846 "SWESMBI Software EEPROM semaphore not granted.\n");
1847 ixgbe_release_eeprom_semaphore(hw);
1848 status = IXGBE_ERR_EEPROM;
1851 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1852 "Software semaphore SMBI between device drivers "
1860 * ixgbe_release_eeprom_semaphore - Release hardware semaphore
1861 * @hw: pointer to hardware structure
1863 * This function clears hardware semaphore bits.
1865 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1869 DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1871 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1873 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1874 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1875 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1876 IXGBE_WRITE_FLUSH(hw);
1880 * ixgbe_ready_eeprom - Polls for EEPROM ready
1881 * @hw: pointer to hardware structure
1883 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1885 s32 status = IXGBE_SUCCESS;
1889 DEBUGFUNC("ixgbe_ready_eeprom");
1892 * Read "Status Register" repeatedly until the LSB is cleared. The
1893 * EEPROM will signal that the command has been completed by clearing
1894 * bit 0 of the internal status register. If it's not cleared within
1895 * 5 milliseconds, then error out.
1897 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1898 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1899 IXGBE_EEPROM_OPCODE_BITS);
1900 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1901 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1905 ixgbe_standby_eeprom(hw);
1909 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1910 * devices (and only 0-5mSec on 5V devices)
1912 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1913 DEBUGOUT("SPI EEPROM Status error\n");
1914 status = IXGBE_ERR_EEPROM;
1921 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1922 * @hw: pointer to hardware structure
1924 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1928 DEBUGFUNC("ixgbe_standby_eeprom");
1930 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1932 /* Toggle CS to flush commands */
1933 eec |= IXGBE_EEC_CS;
1934 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1935 IXGBE_WRITE_FLUSH(hw);
1937 eec &= ~IXGBE_EEC_CS;
1938 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1939 IXGBE_WRITE_FLUSH(hw);
1944 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
1945 * @hw: pointer to hardware structure
1946 * @data: data to send to the EEPROM
1947 * @count: number of bits to shift out
1949 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
1956 DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
1958 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1961 * Mask is used to shift "count" bits of "data" out to the EEPROM
1962 * one bit at a time. Determine the starting bit based on count
1964 mask = 0x01 << (count - 1);
1966 for (i = 0; i < count; i++) {
1968 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
1969 * "1", and then raising and then lowering the clock (the SK
1970 * bit controls the clock input to the EEPROM). A "0" is
1971 * shifted out to the EEPROM by setting "DI" to "0" and then
1972 * raising and then lowering the clock.
1975 eec |= IXGBE_EEC_DI;
1977 eec &= ~IXGBE_EEC_DI;
1979 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1980 IXGBE_WRITE_FLUSH(hw);
1984 ixgbe_raise_eeprom_clk(hw, &eec);
1985 ixgbe_lower_eeprom_clk(hw, &eec);
1988 * Shift mask to signify next bit of data to shift in to the
1994 /* We leave the "DI" bit set to "0" when we leave this routine. */
1995 eec &= ~IXGBE_EEC_DI;
1996 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1997 IXGBE_WRITE_FLUSH(hw);
2001 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
2002 * @hw: pointer to hardware structure
2004 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
2010 DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
2013 * In order to read a register from the EEPROM, we need to shift
2014 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
2015 * the clock input to the EEPROM (setting the SK bit), and then reading
2016 * the value of the "DO" bit. During this "shifting in" process the
2017 * "DI" bit should always be clear.
2019 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
2021 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
2023 for (i = 0; i < count; i++) {
2025 ixgbe_raise_eeprom_clk(hw, &eec);
2027 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
2029 eec &= ~(IXGBE_EEC_DI);
2030 if (eec & IXGBE_EEC_DO)
2033 ixgbe_lower_eeprom_clk(hw, &eec);
2040 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
2041 * @hw: pointer to hardware structure
2042 * @eec: EEC register's current value
2044 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2046 DEBUGFUNC("ixgbe_raise_eeprom_clk");
2049 * Raise the clock input to the EEPROM
2050 * (setting the SK bit), then delay
2052 *eec = *eec | IXGBE_EEC_SK;
2053 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
2054 IXGBE_WRITE_FLUSH(hw);
2059 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
2060 * @hw: pointer to hardware structure
2061 * @eecd: EECD's current value
2063 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2065 DEBUGFUNC("ixgbe_lower_eeprom_clk");
2068 * Lower the clock input to the EEPROM (clearing the SK bit), then
2071 *eec = *eec & ~IXGBE_EEC_SK;
2072 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
2073 IXGBE_WRITE_FLUSH(hw);
2078 * ixgbe_release_eeprom - Release EEPROM, release semaphores
2079 * @hw: pointer to hardware structure
2081 static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
2085 DEBUGFUNC("ixgbe_release_eeprom");
2087 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
2089 eec |= IXGBE_EEC_CS; /* Pull CS high */
2090 eec &= ~IXGBE_EEC_SK; /* Lower SCK */
2092 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
2093 IXGBE_WRITE_FLUSH(hw);
2097 /* Stop requesting EEPROM access */
2098 eec &= ~IXGBE_EEC_REQ;
2099 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
2101 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
2103 /* Delay before attempt to obtain semaphore again to allow FW access */
2104 msec_delay(hw->eeprom.semaphore_delay);
2108 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
2109 * @hw: pointer to hardware structure
2111 u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
2120 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
2122 /* Include 0x0-0x3F in the checksum */
2123 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
2124 if (hw->eeprom.ops.read(hw, i, &word) != IXGBE_SUCCESS) {
2125 DEBUGOUT("EEPROM read failed\n");
2131 /* Include all data from pointers except for the fw pointer */
2132 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
2133 hw->eeprom.ops.read(hw, i, &pointer);
2135 /* Make sure the pointer seems valid */
2136 if (pointer != 0xFFFF && pointer != 0) {
2137 hw->eeprom.ops.read(hw, pointer, &length);
2139 if (length != 0xFFFF && length != 0) {
2140 for (j = pointer+1; j <= pointer+length; j++) {
2141 hw->eeprom.ops.read(hw, j, &word);
2148 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
2154 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
2155 * @hw: pointer to hardware structure
2156 * @checksum_val: calculated checksum
2158 * Performs checksum calculation and validates the EEPROM checksum. If the
2159 * caller does not need checksum_val, the value can be NULL.
2161 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
2166 u16 read_checksum = 0;
2168 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
2171 * Read the first word from the EEPROM. If this times out or fails, do
2172 * not continue or we could be in for a very long wait while every
2175 status = hw->eeprom.ops.read(hw, 0, &checksum);
2177 if (status == IXGBE_SUCCESS) {
2178 checksum = hw->eeprom.ops.calc_checksum(hw);
2180 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
2183 * Verify read checksum from EEPROM is the same as
2184 * calculated checksum
2186 if (read_checksum != checksum)
2187 status = IXGBE_ERR_EEPROM_CHECKSUM;
2189 /* If the user cares, return the calculated checksum */
2191 *checksum_val = checksum;
2193 DEBUGOUT("EEPROM read failed\n");
2200 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
2201 * @hw: pointer to hardware structure
2203 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
2208 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
2211 * Read the first word from the EEPROM. If this times out or fails, do
2212 * not continue or we could be in for a very long wait while every
2215 status = hw->eeprom.ops.read(hw, 0, &checksum);
2217 if (status == IXGBE_SUCCESS) {
2218 checksum = hw->eeprom.ops.calc_checksum(hw);
2219 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
2222 DEBUGOUT("EEPROM read failed\n");
2229 * ixgbe_validate_mac_addr - Validate MAC address
2230 * @mac_addr: pointer to MAC address.
2232 * Tests a MAC address to ensure it is a valid Individual Address
2234 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
2236 s32 status = IXGBE_SUCCESS;
2238 DEBUGFUNC("ixgbe_validate_mac_addr");
2240 /* Make sure it is not a multicast address */
2241 if (IXGBE_IS_MULTICAST(mac_addr)) {
2242 DEBUGOUT("MAC address is multicast\n");
2243 status = IXGBE_ERR_INVALID_MAC_ADDR;
2244 /* Not a broadcast address */
2245 } else if (IXGBE_IS_BROADCAST(mac_addr)) {
2246 DEBUGOUT("MAC address is broadcast\n");
2247 status = IXGBE_ERR_INVALID_MAC_ADDR;
2248 /* Reject the zero address */
2249 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
2250 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
2251 DEBUGOUT("MAC address is all zeros\n");
2252 status = IXGBE_ERR_INVALID_MAC_ADDR;
2258 * ixgbe_set_rar_generic - Set Rx address register
2259 * @hw: pointer to hardware structure
2260 * @index: Receive address register to write
2261 * @addr: Address to put into receive address register
2262 * @vmdq: VMDq "set" or "pool" index
2263 * @enable_addr: set flag that address is active
2265 * Puts an ethernet address into a receive address register.
2267 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
2270 u32 rar_low, rar_high;
2271 u32 rar_entries = hw->mac.num_rar_entries;
2273 DEBUGFUNC("ixgbe_set_rar_generic");
2275 /* Make sure we are using a valid rar index range */
2276 if (index >= rar_entries) {
2277 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2278 "RAR index %d is out of range.\n", index);
2279 return IXGBE_ERR_INVALID_ARGUMENT;
2282 /* setup VMDq pool selection before this RAR gets enabled */
2283 hw->mac.ops.set_vmdq(hw, index, vmdq);
2286 * HW expects these in little endian so we reverse the byte
2287 * order from network order (big endian) to little endian
2289 rar_low = ((u32)addr[0] |
2290 ((u32)addr[1] << 8) |
2291 ((u32)addr[2] << 16) |
2292 ((u32)addr[3] << 24));
2294 * Some parts put the VMDq setting in the extra RAH bits,
2295 * so save everything except the lower 16 bits that hold part
2296 * of the address and the address valid bit.
2298 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2299 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2300 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
2302 if (enable_addr != 0)
2303 rar_high |= IXGBE_RAH_AV;
2305 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
2306 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2308 return IXGBE_SUCCESS;
2312 * ixgbe_clear_rar_generic - Remove Rx address register
2313 * @hw: pointer to hardware structure
2314 * @index: Receive address register to write
2316 * Clears an ethernet address from a receive address register.
2318 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
2321 u32 rar_entries = hw->mac.num_rar_entries;
2323 DEBUGFUNC("ixgbe_clear_rar_generic");
2325 /* Make sure we are using a valid rar index range */
2326 if (index >= rar_entries) {
2327 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2328 "RAR index %d is out of range.\n", index);
2329 return IXGBE_ERR_INVALID_ARGUMENT;
2333 * Some parts put the VMDq setting in the extra RAH bits,
2334 * so save everything except the lower 16 bits that hold part
2335 * of the address and the address valid bit.
2337 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2338 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2340 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
2341 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2343 /* clear VMDq pool/queue selection for this RAR */
2344 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
2346 return IXGBE_SUCCESS;
2350 * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
2351 * @hw: pointer to hardware structure
2353 * Places the MAC address in receive address register 0 and clears the rest
2354 * of the receive address registers. Clears the multicast table. Assumes
2355 * the receiver is in reset when the routine is called.
2357 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2360 u32 rar_entries = hw->mac.num_rar_entries;
2362 DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2365 * If the current mac address is valid, assume it is a software override
2366 * to the permanent address.
2367 * Otherwise, use the permanent address from the eeprom.
2369 if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2370 IXGBE_ERR_INVALID_MAC_ADDR) {
2371 /* Get the MAC address from the RAR0 for later reference */
2372 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2374 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2375 hw->mac.addr[0], hw->mac.addr[1],
2377 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2378 hw->mac.addr[4], hw->mac.addr[5]);
2380 /* Setup the receive address. */
2381 DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2382 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2383 hw->mac.addr[0], hw->mac.addr[1],
2385 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2386 hw->mac.addr[4], hw->mac.addr[5]);
2388 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2390 /* clear VMDq pool/queue selection for RAR 0 */
2391 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2393 hw->addr_ctrl.overflow_promisc = 0;
2395 hw->addr_ctrl.rar_used_count = 1;
2397 /* Zero out the other receive addresses. */
2398 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2399 for (i = 1; i < rar_entries; i++) {
2400 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2401 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2405 hw->addr_ctrl.mta_in_use = 0;
2406 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2408 DEBUGOUT(" Clearing MTA\n");
2409 for (i = 0; i < hw->mac.mcft_size; i++)
2410 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2412 ixgbe_init_uta_tables(hw);
2414 return IXGBE_SUCCESS;
2418 * ixgbe_add_uc_addr - Adds a secondary unicast address.
2419 * @hw: pointer to hardware structure
2420 * @addr: new address
2422 * Adds it to unused receive address register or goes into promiscuous mode.
2424 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2426 u32 rar_entries = hw->mac.num_rar_entries;
2429 DEBUGFUNC("ixgbe_add_uc_addr");
2431 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2432 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2435 * Place this address in the RAR if there is room,
2436 * else put the controller into promiscuous mode
2438 if (hw->addr_ctrl.rar_used_count < rar_entries) {
2439 rar = hw->addr_ctrl.rar_used_count;
2440 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2441 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2442 hw->addr_ctrl.rar_used_count++;
2444 hw->addr_ctrl.overflow_promisc++;
2447 DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2451 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2452 * @hw: pointer to hardware structure
2453 * @addr_list: the list of new addresses
2454 * @addr_count: number of addresses
2455 * @next: iterator function to walk the address list
2457 * The given list replaces any existing list. Clears the secondary addrs from
2458 * receive address registers. Uses unused receive address registers for the
2459 * first secondary addresses, and falls back to promiscuous mode as needed.
2461 * Drivers using secondary unicast addresses must set user_set_promisc when
2462 * manually putting the device into promiscuous mode.
2464 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2465 u32 addr_count, ixgbe_mc_addr_itr next)
2469 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2474 DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2477 * Clear accounting of old secondary address list,
2478 * don't count RAR[0]
2480 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2481 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2482 hw->addr_ctrl.overflow_promisc = 0;
2484 /* Zero out the other receive addresses */
2485 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2486 for (i = 0; i < uc_addr_in_use; i++) {
2487 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2488 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2491 /* Add the new addresses */
2492 for (i = 0; i < addr_count; i++) {
2493 DEBUGOUT(" Adding the secondary addresses:\n");
2494 addr = next(hw, &addr_list, &vmdq);
2495 ixgbe_add_uc_addr(hw, addr, vmdq);
2498 if (hw->addr_ctrl.overflow_promisc) {
2499 /* enable promisc if not already in overflow or set by user */
2500 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2501 DEBUGOUT(" Entering address overflow promisc mode\n");
2502 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2503 fctrl |= IXGBE_FCTRL_UPE;
2504 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2507 /* only disable if set by overflow, not by user */
2508 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2509 DEBUGOUT(" Leaving address overflow promisc mode\n");
2510 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2511 fctrl &= ~IXGBE_FCTRL_UPE;
2512 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2516 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2517 return IXGBE_SUCCESS;
2521 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
2522 * @hw: pointer to hardware structure
2523 * @mc_addr: the multicast address
2525 * Extracts the 12 bits, from a multicast address, to determine which
2526 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
2527 * incoming rx multicast addresses, to determine the bit-vector to check in
2528 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2529 * by the MO field of the MCSTCTRL. The MO field is set during initialization
2530 * to mc_filter_type.
2532 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2536 DEBUGFUNC("ixgbe_mta_vector");
2538 switch (hw->mac.mc_filter_type) {
2539 case 0: /* use bits [47:36] of the address */
2540 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2542 case 1: /* use bits [46:35] of the address */
2543 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2545 case 2: /* use bits [45:34] of the address */
2546 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2548 case 3: /* use bits [43:32] of the address */
2549 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2551 default: /* Invalid mc_filter_type */
2552 DEBUGOUT("MC filter type param set incorrectly\n");
2557 /* vector can only be 12-bits or boundary will be exceeded */
2563 * ixgbe_set_mta - Set bit-vector in multicast table
2564 * @hw: pointer to hardware structure
2565 * @hash_value: Multicast address hash value
2567 * Sets the bit-vector in the multicast table.
2569 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2575 DEBUGFUNC("ixgbe_set_mta");
2577 hw->addr_ctrl.mta_in_use++;
2579 vector = ixgbe_mta_vector(hw, mc_addr);
2580 DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2583 * The MTA is a register array of 128 32-bit registers. It is treated
2584 * like an array of 4096 bits. We want to set bit
2585 * BitArray[vector_value]. So we figure out what register the bit is
2586 * in, read it, OR in the new bit, then write back the new value. The
2587 * register is determined by the upper 7 bits of the vector value and
2588 * the bit within that register are determined by the lower 5 bits of
2591 vector_reg = (vector >> 5) & 0x7F;
2592 vector_bit = vector & 0x1F;
2593 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2597 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2598 * @hw: pointer to hardware structure
2599 * @mc_addr_list: the list of new multicast addresses
2600 * @mc_addr_count: number of addresses
2601 * @next: iterator function to walk the multicast address list
2602 * @clear: flag, when set clears the table beforehand
2604 * When the clear flag is set, the given list replaces any existing list.
2605 * Hashes the given addresses into the multicast table.
2607 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2608 u32 mc_addr_count, ixgbe_mc_addr_itr next,
2614 DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2617 * Set the new number of MC addresses that we are being requested to
2620 hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2621 hw->addr_ctrl.mta_in_use = 0;
2623 /* Clear mta_shadow */
2625 DEBUGOUT(" Clearing MTA\n");
2626 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2629 /* Update mta_shadow */
2630 for (i = 0; i < mc_addr_count; i++) {
2631 DEBUGOUT(" Adding the multicast addresses:\n");
2632 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2636 for (i = 0; i < hw->mac.mcft_size; i++)
2637 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2638 hw->mac.mta_shadow[i]);
2640 if (hw->addr_ctrl.mta_in_use > 0)
2641 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2642 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2644 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2645 return IXGBE_SUCCESS;
2649 * ixgbe_enable_mc_generic - Enable multicast address in RAR
2650 * @hw: pointer to hardware structure
2652 * Enables multicast address in RAR and the use of the multicast hash table.
2654 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2656 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2658 DEBUGFUNC("ixgbe_enable_mc_generic");
2660 if (a->mta_in_use > 0)
2661 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2662 hw->mac.mc_filter_type);
2664 return IXGBE_SUCCESS;
2668 * ixgbe_disable_mc_generic - Disable multicast address in RAR
2669 * @hw: pointer to hardware structure
2671 * Disables multicast address in RAR and the use of the multicast hash table.
2673 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2675 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2677 DEBUGFUNC("ixgbe_disable_mc_generic");
2679 if (a->mta_in_use > 0)
2680 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2682 return IXGBE_SUCCESS;
2686 * ixgbe_fc_enable_generic - Enable flow control
2687 * @hw: pointer to hardware structure
2689 * Enable flow control according to the current settings.
2691 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2693 s32 ret_val = IXGBE_SUCCESS;
2694 u32 mflcn_reg, fccfg_reg;
2699 DEBUGFUNC("ixgbe_fc_enable_generic");
2701 /* Validate the water mark configuration */
2702 if (!hw->fc.pause_time) {
2703 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2707 /* Low water mark of zero causes XOFF floods */
2708 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2709 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2710 hw->fc.high_water[i]) {
2711 if (!hw->fc.low_water[i] ||
2712 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2713 DEBUGOUT("Invalid water mark configuration\n");
2714 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2720 /* Negotiate the fc mode to use */
2721 ixgbe_fc_autoneg(hw);
2723 /* Disable any previous flow control settings */
2724 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2725 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2727 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2728 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2731 * The possible values of fc.current_mode are:
2732 * 0: Flow control is completely disabled
2733 * 1: Rx flow control is enabled (we can receive pause frames,
2734 * but not send pause frames).
2735 * 2: Tx flow control is enabled (we can send pause frames but
2736 * we do not support receiving pause frames).
2737 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2740 switch (hw->fc.current_mode) {
2743 * Flow control is disabled by software override or autoneg.
2744 * The code below will actually disable it in the HW.
2747 case ixgbe_fc_rx_pause:
2749 * Rx Flow control is enabled and Tx Flow control is
2750 * disabled by software override. Since there really
2751 * isn't a way to advertise that we are capable of RX
2752 * Pause ONLY, we will advertise that we support both
2753 * symmetric and asymmetric Rx PAUSE. Later, we will
2754 * disable the adapter's ability to send PAUSE frames.
2756 mflcn_reg |= IXGBE_MFLCN_RFCE;
2758 case ixgbe_fc_tx_pause:
2760 * Tx Flow control is enabled, and Rx Flow control is
2761 * disabled by software override.
2763 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2766 /* Flow control (both Rx and Tx) is enabled by SW override. */
2767 mflcn_reg |= IXGBE_MFLCN_RFCE;
2768 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2771 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
2772 "Flow control param set incorrectly\n");
2773 ret_val = IXGBE_ERR_CONFIG;
2778 /* Set 802.3x based flow control settings. */
2779 mflcn_reg |= IXGBE_MFLCN_DPF;
2780 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2781 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2784 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2785 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2786 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2787 hw->fc.high_water[i]) {
2788 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2789 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2790 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2792 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2794 * In order to prevent Tx hangs when the internal Tx
2795 * switch is enabled we must set the high water mark
2796 * to the maximum FCRTH value. This allows the Tx
2797 * switch to function even under heavy Rx workloads.
2799 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
2802 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2805 /* Configure pause time (2 TCs per register) */
2806 reg = hw->fc.pause_time * 0x00010001;
2807 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2808 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2810 /* Configure flow control refresh threshold value */
2811 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2818 * ixgbe_negotiate_fc - Negotiate flow control
2819 * @hw: pointer to hardware structure
2820 * @adv_reg: flow control advertised settings
2821 * @lp_reg: link partner's flow control settings
2822 * @adv_sym: symmetric pause bit in advertisement
2823 * @adv_asm: asymmetric pause bit in advertisement
2824 * @lp_sym: symmetric pause bit in link partner advertisement
2825 * @lp_asm: asymmetric pause bit in link partner advertisement
2827 * Find the intersection between advertised settings and link partner's
2828 * advertised settings
2830 static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2831 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2833 if ((!(adv_reg)) || (!(lp_reg))) {
2834 ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
2835 "Local or link partner's advertised flow control "
2836 "settings are NULL. Local: %x, link partner: %x\n",
2838 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2841 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2843 * Now we need to check if the user selected Rx ONLY
2844 * of pause frames. In this case, we had to advertise
2845 * FULL flow control because we could not advertise RX
2846 * ONLY. Hence, we must now check to see if we need to
2847 * turn OFF the TRANSMISSION of PAUSE frames.
2849 if (hw->fc.requested_mode == ixgbe_fc_full) {
2850 hw->fc.current_mode = ixgbe_fc_full;
2851 DEBUGOUT("Flow Control = FULL.\n");
2853 hw->fc.current_mode = ixgbe_fc_rx_pause;
2854 DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2856 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2857 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2858 hw->fc.current_mode = ixgbe_fc_tx_pause;
2859 DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2860 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2861 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2862 hw->fc.current_mode = ixgbe_fc_rx_pause;
2863 DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2865 hw->fc.current_mode = ixgbe_fc_none;
2866 DEBUGOUT("Flow Control = NONE.\n");
2868 return IXGBE_SUCCESS;
2872 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2873 * @hw: pointer to hardware structure
2875 * Enable flow control according on 1 gig fiber.
2877 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2879 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2880 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2883 * On multispeed fiber at 1g, bail out if
2884 * - link is up but AN did not complete, or if
2885 * - link is up and AN completed but timed out
2888 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2889 if ((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0 ||
2890 (linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT)) {
2891 ERROR_REPORT1(IXGBE_ERROR_POLLING,
2892 "Auto-Negotiation did not complete or timed out");
2896 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2897 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2899 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2900 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2901 IXGBE_PCS1GANA_ASM_PAUSE,
2902 IXGBE_PCS1GANA_SYM_PAUSE,
2903 IXGBE_PCS1GANA_ASM_PAUSE);
2910 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2911 * @hw: pointer to hardware structure
2913 * Enable flow control according to IEEE clause 37.
2915 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2917 u32 links2, anlp1_reg, autoc_reg, links;
2918 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2921 * On backplane, bail out if
2922 * - backplane autoneg was not completed, or if
2923 * - we are 82599 and link partner is not AN enabled
2925 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2926 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
2927 ERROR_REPORT1(IXGBE_ERROR_POLLING,
2928 "Auto-Negotiation did not complete");
2932 if (hw->mac.type == ixgbe_mac_82599EB) {
2933 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2934 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
2935 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
2936 "Link partner is not AN enabled");
2941 * Read the 10g AN autoc and LP ability registers and resolve
2942 * local flow control settings accordingly
2944 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2945 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2947 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2948 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2949 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2956 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
2957 * @hw: pointer to hardware structure
2959 * Enable flow control according to IEEE clause 37.
2961 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2963 u16 technology_ability_reg = 0;
2964 u16 lp_technology_ability_reg = 0;
2966 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2967 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2968 &technology_ability_reg);
2969 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
2970 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2971 &lp_technology_ability_reg);
2973 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
2974 (u32)lp_technology_ability_reg,
2975 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2976 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2980 * ixgbe_fc_autoneg - Configure flow control
2981 * @hw: pointer to hardware structure
2983 * Compares our advertised flow control capabilities to those advertised by
2984 * our link partner, and determines the proper flow control mode to use.
2986 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2988 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2989 ixgbe_link_speed speed;
2992 DEBUGFUNC("ixgbe_fc_autoneg");
2995 * AN should have completed when the cable was plugged in.
2996 * Look for reasons to bail out. Bail out if:
2997 * - FC autoneg is disabled, or if
3000 if (hw->fc.disable_fc_autoneg) {
3001 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
3002 "Flow control autoneg is disabled");
3006 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
3008 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
3012 switch (hw->phy.media_type) {
3013 /* Autoneg flow control on fiber adapters */
3014 case ixgbe_media_type_fiber_fixed:
3015 case ixgbe_media_type_fiber:
3016 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
3017 ret_val = ixgbe_fc_autoneg_fiber(hw);
3020 /* Autoneg flow control on backplane adapters */
3021 case ixgbe_media_type_backplane:
3022 ret_val = ixgbe_fc_autoneg_backplane(hw);
3025 /* Autoneg flow control on copper adapters */
3026 case ixgbe_media_type_copper:
3027 if (ixgbe_device_supports_autoneg_fc(hw))
3028 ret_val = ixgbe_fc_autoneg_copper(hw);
3036 if (ret_val == IXGBE_SUCCESS) {
3037 hw->fc.fc_was_autonegged = TRUE;
3039 hw->fc.fc_was_autonegged = FALSE;
3040 hw->fc.current_mode = hw->fc.requested_mode;
3045 * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
3046 * @hw: pointer to hardware structure
3048 * System-wide timeout range is encoded in PCIe Device Control2 register.
3050 * Add 10% to specified maximum and return the number of times to poll for
3051 * completion timeout, in units of 100 microsec. Never return less than
3052 * 800 = 80 millisec.
3054 static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
3059 devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
3060 devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
3063 case IXGBE_PCIDEVCTRL2_65_130ms:
3064 pollcnt = 1300; /* 130 millisec */
3066 case IXGBE_PCIDEVCTRL2_260_520ms:
3067 pollcnt = 5200; /* 520 millisec */
3069 case IXGBE_PCIDEVCTRL2_1_2s:
3070 pollcnt = 20000; /* 2 sec */
3072 case IXGBE_PCIDEVCTRL2_4_8s:
3073 pollcnt = 80000; /* 8 sec */
3075 case IXGBE_PCIDEVCTRL2_17_34s:
3076 pollcnt = 34000; /* 34 sec */
3078 case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */
3079 case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */
3080 case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */
3081 case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */
3083 pollcnt = 800; /* 80 millisec minimum */
3087 /* add 10% to spec maximum */
3088 return (pollcnt * 11) / 10;
3092 * ixgbe_disable_pcie_master - Disable PCI-express master access
3093 * @hw: pointer to hardware structure
3095 * Disables PCI-Express master access and verifies there are no pending
3096 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
3097 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
3098 * is returned signifying master requests disabled.
3100 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
3102 s32 status = IXGBE_SUCCESS;
3105 DEBUGFUNC("ixgbe_disable_pcie_master");
3107 /* Always set this bit to ensure any future transactions are blocked */
3108 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
3110 /* Exit if master requests are blocked */
3111 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
3114 /* Poll for master request bit to clear */
3115 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
3117 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
3122 * Two consecutive resets are required via CTRL.RST per datasheet
3123 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
3124 * of this need. The first reset prevents new master requests from
3125 * being issued by our device. We then must wait 1usec or more for any
3126 * remaining completions from the PCIe bus to trickle in, and then reset
3127 * again to clear out any effects they may have had on our device.
3129 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
3130 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3133 * Before proceeding, make sure that the PCIe block does not have
3134 * transactions pending.
3136 poll = ixgbe_pcie_timeout_poll(hw);
3137 for (i = 0; i < poll; i++) {
3139 if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
3140 IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3144 ERROR_REPORT1(IXGBE_ERROR_POLLING,
3145 "PCIe transaction pending bit also did not clear.\n");
3146 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
3153 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
3154 * @hw: pointer to hardware structure
3155 * @mask: Mask to specify which semaphore to acquire
3157 * Acquires the SWFW semaphore through the GSSR register for the specified
3158 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3160 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
3164 u32 fwmask = mask << 5;
3168 DEBUGFUNC("ixgbe_acquire_swfw_sync");
3170 for (i = 0; i < timeout; i++) {
3172 * SW NVM semaphore bit is used for access to all
3173 * SW_FW_SYNC bits (not just NVM)
3175 if (ixgbe_get_eeprom_semaphore(hw))
3176 return IXGBE_ERR_SWFW_SYNC;
3178 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3179 if (!(gssr & (fwmask | swmask))) {
3181 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3182 ixgbe_release_eeprom_semaphore(hw);
3183 return IXGBE_SUCCESS;
3185 /* Resource is currently in use by FW or SW */
3186 ixgbe_release_eeprom_semaphore(hw);
3191 /* If time expired clear the bits holding the lock and retry */
3192 if (gssr & (fwmask | swmask))
3193 ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
3196 return IXGBE_ERR_SWFW_SYNC;
3200 * ixgbe_release_swfw_sync - Release SWFW semaphore
3201 * @hw: pointer to hardware structure
3202 * @mask: Mask to specify which semaphore to release
3204 * Releases the SWFW semaphore through the GSSR register for the specified
3205 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3207 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
3212 DEBUGFUNC("ixgbe_release_swfw_sync");
3214 ixgbe_get_eeprom_semaphore(hw);
3216 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3218 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3220 ixgbe_release_eeprom_semaphore(hw);
3224 * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
3225 * @hw: pointer to hardware structure
3227 * Stops the receive data path and waits for the HW to internally empty
3228 * the Rx security block
3230 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
3232 #define IXGBE_MAX_SECRX_POLL 40
3237 DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
3240 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3241 secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
3242 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3243 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
3244 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
3245 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
3248 /* Use interrupt-safe sleep just in case */
3252 /* For informational purposes only */
3253 if (i >= IXGBE_MAX_SECRX_POLL)
3254 DEBUGOUT("Rx unit being enabled before security "
3255 "path fully disabled. Continuing with init.\n");
3257 return IXGBE_SUCCESS;
3261 * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
3262 * @hw: pointer to hardware structure
3264 * Enables the receive data path.
3266 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
3270 DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
3272 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3273 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
3274 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3275 IXGBE_WRITE_FLUSH(hw);
3277 return IXGBE_SUCCESS;
3281 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
3282 * @hw: pointer to hardware structure
3283 * @regval: register value to write to RXCTRL
3285 * Enables the Rx DMA unit
3287 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
3289 DEBUGFUNC("ixgbe_enable_rx_dma_generic");
3291 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
3293 return IXGBE_SUCCESS;
3297 * ixgbe_blink_led_start_generic - Blink LED based on index.
3298 * @hw: pointer to hardware structure
3299 * @index: led number to blink
3301 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
3303 ixgbe_link_speed speed = 0;
3305 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3306 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3307 s32 ret_val = IXGBE_SUCCESS;
3309 DEBUGFUNC("ixgbe_blink_led_start_generic");
3312 * Link must be up to auto-blink the LEDs;
3313 * Force it if link is down.
3315 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
3318 /* Need the SW/FW semaphore around AUTOC writes if 82599 and
3321 bool got_lock = FALSE;
3322 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3323 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
3324 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
3325 IXGBE_GSSR_MAC_CSR_SM);
3326 if (ret_val != IXGBE_SUCCESS) {
3327 ret_val = IXGBE_ERR_SWFW_SYNC;
3333 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3334 autoc_reg |= IXGBE_AUTOC_FLU;
3335 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
3336 IXGBE_WRITE_FLUSH(hw);
3339 hw->mac.ops.release_swfw_sync(hw,
3340 IXGBE_GSSR_MAC_CSR_SM);
3344 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3345 led_reg |= IXGBE_LED_BLINK(index);
3346 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3347 IXGBE_WRITE_FLUSH(hw);
3354 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
3355 * @hw: pointer to hardware structure
3356 * @index: led number to stop blinking
3358 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
3360 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3361 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3362 s32 ret_val = IXGBE_SUCCESS;
3363 bool got_lock = FALSE;
3365 DEBUGFUNC("ixgbe_blink_led_stop_generic");
3366 /* Need the SW/FW semaphore around AUTOC writes if 82599 and
3369 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3370 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
3371 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
3372 IXGBE_GSSR_MAC_CSR_SM);
3373 if (ret_val != IXGBE_SUCCESS) {
3374 ret_val = IXGBE_ERR_SWFW_SYNC;
3381 autoc_reg &= ~IXGBE_AUTOC_FLU;
3382 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3383 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
3385 if (hw->mac.type == ixgbe_mac_82599EB)
3386 ixgbe_reset_pipeline_82599(hw);
3389 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
3391 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3392 led_reg &= ~IXGBE_LED_BLINK(index);
3393 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3394 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3395 IXGBE_WRITE_FLUSH(hw);
3402 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3403 * @hw: pointer to hardware structure
3404 * @san_mac_offset: SAN MAC address offset
3406 * This function will read the EEPROM location for the SAN MAC address
3407 * pointer, and returns the value at that location. This is used in both
3408 * get and set mac_addr routines.
3410 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3411 u16 *san_mac_offset)
3415 DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3418 * First read the EEPROM pointer to see if the MAC addresses are
3421 ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
3424 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3425 "eeprom at offset %d failed",
3426 IXGBE_SAN_MAC_ADDR_PTR);
3433 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3434 * @hw: pointer to hardware structure
3435 * @san_mac_addr: SAN MAC address
3437 * Reads the SAN MAC address from the EEPROM, if it's available. This is
3438 * per-port, so set_lan_id() must be called before reading the addresses.
3439 * set_lan_id() is called by identify_sfp(), but this cannot be relied
3440 * upon for non-SFP connections, so we must call it here.
3442 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3444 u16 san_mac_data, san_mac_offset;
3448 DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3451 * First read the EEPROM pointer to see if the MAC addresses are
3452 * available. If they're not, no point in calling set_lan_id() here.
3454 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3455 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3456 goto san_mac_addr_out;
3458 /* make sure we know which port we need to program */
3459 hw->mac.ops.set_lan_id(hw);
3460 /* apply the port offset to the address offset */
3461 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3462 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3463 for (i = 0; i < 3; i++) {
3464 ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
3467 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3468 "eeprom read at offset %d failed",
3470 goto san_mac_addr_out;
3472 san_mac_addr[i * 2] = (u8)(san_mac_data);
3473 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3476 return IXGBE_SUCCESS;
3480 * No addresses available in this EEPROM. It's not an
3481 * error though, so just wipe the local address and return.
3483 for (i = 0; i < 6; i++)
3484 san_mac_addr[i] = 0xFF;
3485 return IXGBE_SUCCESS;
3489 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3490 * @hw: pointer to hardware structure
3491 * @san_mac_addr: SAN MAC address
3493 * Write a SAN MAC address to the EEPROM.
3495 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3498 u16 san_mac_data, san_mac_offset;
3501 DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3503 /* Look for SAN mac address pointer. If not defined, return */
3504 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3505 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3506 return IXGBE_ERR_NO_SAN_ADDR_PTR;
3508 /* Make sure we know which port we need to write */
3509 hw->mac.ops.set_lan_id(hw);
3510 /* Apply the port offset to the address offset */
3511 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3512 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3514 for (i = 0; i < 3; i++) {
3515 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3516 san_mac_data |= (u16)(san_mac_addr[i * 2]);
3517 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3521 return IXGBE_SUCCESS;
3525 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3526 * @hw: pointer to hardware structure
3528 * Read PCIe configuration space, and get the MSI-X vector count from
3529 * the capabilities table.
3531 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3537 switch (hw->mac.type) {
3538 case ixgbe_mac_82598EB:
3539 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3540 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3542 case ixgbe_mac_82599EB:
3543 case ixgbe_mac_X540:
3544 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3545 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3551 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3552 msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3553 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3555 /* MSI-X count is zero-based in HW */
3558 if (msix_count > max_msix_count)
3559 msix_count = max_msix_count;
3565 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3566 * @hw: pointer to hardware structure
3567 * @addr: Address to put into receive address register
3568 * @vmdq: VMDq pool to assign
3570 * Puts an ethernet address into a receive address register, or
3571 * finds the rar that it is aleady in; adds to the pool list
3573 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3575 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3576 u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3578 u32 rar_low, rar_high;
3579 u32 addr_low, addr_high;
3581 DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3583 /* swap bytes for HW little endian */
3584 addr_low = addr[0] | (addr[1] << 8)
3587 addr_high = addr[4] | (addr[5] << 8);
3590 * Either find the mac_id in rar or find the first empty space.
3591 * rar_highwater points to just after the highest currently used
3592 * rar in order to shorten the search. It grows when we add a new
3595 for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3596 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3598 if (((IXGBE_RAH_AV & rar_high) == 0)
3599 && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3600 first_empty_rar = rar;
3601 } else if ((rar_high & 0xFFFF) == addr_high) {
3602 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3603 if (rar_low == addr_low)
3604 break; /* found it already in the rars */
3608 if (rar < hw->mac.rar_highwater) {
3609 /* already there so just add to the pool bits */
3610 ixgbe_set_vmdq(hw, rar, vmdq);
3611 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3612 /* stick it into first empty RAR slot we found */
3613 rar = first_empty_rar;
3614 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3615 } else if (rar == hw->mac.rar_highwater) {
3616 /* add it to the top of the list and inc the highwater mark */
3617 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3618 hw->mac.rar_highwater++;
3619 } else if (rar >= hw->mac.num_rar_entries) {
3620 return IXGBE_ERR_INVALID_MAC_ADDR;
3624 * If we found rar[0], make sure the default pool bit (we use pool 0)
3625 * remains cleared to be sure default pool packets will get delivered
3628 ixgbe_clear_vmdq(hw, rar, 0);
3634 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3635 * @hw: pointer to hardware struct
3636 * @rar: receive address register index to disassociate
3637 * @vmdq: VMDq pool index to remove from the rar
3639 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3641 u32 mpsar_lo, mpsar_hi;
3642 u32 rar_entries = hw->mac.num_rar_entries;
3644 DEBUGFUNC("ixgbe_clear_vmdq_generic");
3646 /* Make sure we are using a valid rar index range */
3647 if (rar >= rar_entries) {
3648 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3649 "RAR index %d is out of range.\n", rar);
3650 return IXGBE_ERR_INVALID_ARGUMENT;
3653 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3654 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3656 if (!mpsar_lo && !mpsar_hi)
3659 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3661 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3665 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3668 } else if (vmdq < 32) {
3669 mpsar_lo &= ~(1 << vmdq);
3670 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3672 mpsar_hi &= ~(1 << (vmdq - 32));
3673 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3676 /* was that the last pool using this rar? */
3677 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
3678 hw->mac.ops.clear_rar(hw, rar);
3680 return IXGBE_SUCCESS;
3684 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3685 * @hw: pointer to hardware struct
3686 * @rar: receive address register index to associate with a VMDq index
3687 * @vmdq: VMDq pool index
3689 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3692 u32 rar_entries = hw->mac.num_rar_entries;
3694 DEBUGFUNC("ixgbe_set_vmdq_generic");
3696 /* Make sure we are using a valid rar index range */
3697 if (rar >= rar_entries) {
3698 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3699 "RAR index %d is out of range.\n", rar);
3700 return IXGBE_ERR_INVALID_ARGUMENT;
3704 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3706 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3708 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3709 mpsar |= 1 << (vmdq - 32);
3710 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3712 return IXGBE_SUCCESS;
3716 * This function should only be involved in the IOV mode.
3717 * In IOV mode, Default pool is next pool after the number of
3718 * VFs advertized and not 0.
3719 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3721 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
3722 * @hw: pointer to hardware struct
3723 * @vmdq: VMDq pool index
3725 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3727 u32 rar = hw->mac.san_mac_rar_index;
3729 DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3732 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3733 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3735 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3736 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3739 return IXGBE_SUCCESS;
3743 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3744 * @hw: pointer to hardware structure
3746 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3750 DEBUGFUNC("ixgbe_init_uta_tables_generic");
3751 DEBUGOUT(" Clearing UTA\n");
3753 for (i = 0; i < 128; i++)
3754 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3756 return IXGBE_SUCCESS;
3760 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3761 * @hw: pointer to hardware structure
3762 * @vlan: VLAN id to write to VLAN filter
3764 * return the VLVF index where this VLAN id should be placed
3767 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
3770 u32 first_empty_slot = 0;
3773 /* short cut the special case */
3778 * Search for the vlan id in the VLVF entries. Save off the first empty
3779 * slot found along the way
3781 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
3782 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3783 if (!bits && !(first_empty_slot))
3784 first_empty_slot = regindex;
3785 else if ((bits & 0x0FFF) == vlan)
3790 * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
3791 * in the VLVF. Else use the first empty VLVF register for this
3794 if (regindex >= IXGBE_VLVF_ENTRIES) {
3795 if (first_empty_slot)
3796 regindex = first_empty_slot;
3798 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE,
3799 "No space in VLVF.\n");
3800 regindex = IXGBE_ERR_NO_SPACE;
3808 * ixgbe_set_vfta_generic - Set VLAN filter table
3809 * @hw: pointer to hardware structure
3810 * @vlan: VLAN id to write to VLAN filter
3811 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
3812 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
3814 * Turn on/off specified VLAN in the VLAN filter table.
3816 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3823 s32 ret_val = IXGBE_SUCCESS;
3824 bool vfta_changed = FALSE;
3826 DEBUGFUNC("ixgbe_set_vfta_generic");
3829 return IXGBE_ERR_PARAM;
3832 * this is a 2 part operation - first the VFTA, then the
3833 * VLVF and VLVFB if VT Mode is set
3834 * We don't write the VFTA until we know the VLVF part succeeded.
3838 * The VFTA is a bitstring made up of 128 32-bit registers
3839 * that enable the particular VLAN id, much like the MTA:
3840 * bits[11-5]: which register
3841 * bits[4-0]: which bit in the register
3843 regindex = (vlan >> 5) & 0x7F;
3844 bitindex = vlan & 0x1F;
3845 targetbit = (1 << bitindex);
3846 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
3849 if (!(vfta & targetbit)) {
3851 vfta_changed = TRUE;
3854 if ((vfta & targetbit)) {
3856 vfta_changed = TRUE;
3861 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
3863 ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
3865 if (ret_val != IXGBE_SUCCESS)
3869 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
3871 return IXGBE_SUCCESS;
3875 * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
3876 * @hw: pointer to hardware structure
3877 * @vlan: VLAN id to write to VLAN filter
3878 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
3879 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
3880 * @vfta_changed: pointer to boolean flag which indicates whether VFTA
3883 * Turn on/off specified bit in VLVF table.
3885 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3886 bool vlan_on, bool *vfta_changed)
3890 DEBUGFUNC("ixgbe_set_vlvf_generic");
3893 return IXGBE_ERR_PARAM;
3895 /* If VT Mode is set
3897 * make sure the vlan is in VLVF
3898 * set the vind bit in the matching VLVFB
3900 * clear the pool bit and possibly the vind
3902 vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3903 if (vt & IXGBE_VT_CTL_VT_ENABLE) {
3907 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
3912 /* set the pool bit */
3914 bits = IXGBE_READ_REG(hw,
3915 IXGBE_VLVFB(vlvf_index * 2));
3916 bits |= (1 << vind);
3918 IXGBE_VLVFB(vlvf_index * 2),
3921 bits = IXGBE_READ_REG(hw,
3922 IXGBE_VLVFB((vlvf_index * 2) + 1));
3923 bits |= (1 << (vind - 32));
3925 IXGBE_VLVFB((vlvf_index * 2) + 1),
3929 /* clear the pool bit */
3931 bits = IXGBE_READ_REG(hw,
3932 IXGBE_VLVFB(vlvf_index * 2));
3933 bits &= ~(1 << vind);
3935 IXGBE_VLVFB(vlvf_index * 2),
3937 bits |= IXGBE_READ_REG(hw,
3938 IXGBE_VLVFB((vlvf_index * 2) + 1));
3940 bits = IXGBE_READ_REG(hw,
3941 IXGBE_VLVFB((vlvf_index * 2) + 1));
3942 bits &= ~(1 << (vind - 32));
3944 IXGBE_VLVFB((vlvf_index * 2) + 1),
3946 bits |= IXGBE_READ_REG(hw,
3947 IXGBE_VLVFB(vlvf_index * 2));
3952 * If there are still bits set in the VLVFB registers
3953 * for the VLAN ID indicated we need to see if the
3954 * caller is requesting that we clear the VFTA entry bit.
3955 * If the caller has requested that we clear the VFTA
3956 * entry bit but there are still pools/VFs using this VLAN
3957 * ID entry then ignore the request. We're not worried
3958 * about the case where we're turning the VFTA VLAN ID
3959 * entry bit on, only when requested to turn it off as
3960 * there may be multiple pools and/or VFs using the
3961 * VLAN ID entry. In that case we cannot clear the
3962 * VFTA bit until all pools/VFs using that VLAN ID have also
3963 * been cleared. This will be indicated by "bits" being
3967 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
3968 (IXGBE_VLVF_VIEN | vlan));
3969 if ((!vlan_on) && (vfta_changed != NULL)) {
3970 /* someone wants to clear the vfta entry
3971 * but some pools/VFs are still using it.
3973 *vfta_changed = FALSE;
3976 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3979 return IXGBE_SUCCESS;
3983 * ixgbe_clear_vfta_generic - Clear VLAN filter table
3984 * @hw: pointer to hardware structure
3986 * Clears the VLAN filer table, and the VMDq index associated with the filter
3988 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
3992 DEBUGFUNC("ixgbe_clear_vfta_generic");
3994 for (offset = 0; offset < hw->mac.vft_size; offset++)
3995 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
3997 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
3998 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
3999 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
4000 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
4003 return IXGBE_SUCCESS;
4007 * ixgbe_check_mac_link_generic - Determine link and speed status
4008 * @hw: pointer to hardware structure
4009 * @speed: pointer to link speed
4010 * @link_up: TRUE when link is up
4011 * @link_up_wait_to_complete: bool used to wait for link up or not
4013 * Reads the links register to determine if link is up and the current speed
4015 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4016 bool *link_up, bool link_up_wait_to_complete)
4018 u32 links_reg, links_orig;
4021 DEBUGFUNC("ixgbe_check_mac_link_generic");
4023 /* clear the old state */
4024 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
4026 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4028 if (links_orig != links_reg) {
4029 DEBUGOUT2("LINKS changed from %08X to %08X\n",
4030 links_orig, links_reg);
4033 if (link_up_wait_to_complete) {
4034 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
4035 if (links_reg & IXGBE_LINKS_UP) {
4042 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4045 if (links_reg & IXGBE_LINKS_UP)
4051 if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
4052 IXGBE_LINKS_SPEED_10G_82599)
4053 *speed = IXGBE_LINK_SPEED_10GB_FULL;
4054 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
4055 IXGBE_LINKS_SPEED_1G_82599)
4056 *speed = IXGBE_LINK_SPEED_1GB_FULL;
4057 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
4058 IXGBE_LINKS_SPEED_100_82599)
4059 *speed = IXGBE_LINK_SPEED_100_FULL;
4061 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4063 return IXGBE_SUCCESS;
4067 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
4069 * @hw: pointer to hardware structure
4070 * @wwnn_prefix: the alternative WWNN prefix
4071 * @wwpn_prefix: the alternative WWPN prefix
4073 * This function will read the EEPROM from the alternative SAN MAC address
4074 * block to check the support for the alternative WWNN/WWPN prefix support.
4076 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
4080 u16 alt_san_mac_blk_offset;
4082 DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
4084 /* clear output first */
4085 *wwnn_prefix = 0xFFFF;
4086 *wwpn_prefix = 0xFFFF;
4088 /* check if alternative SAN MAC is supported */
4089 offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
4090 if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
4091 goto wwn_prefix_err;
4093 if ((alt_san_mac_blk_offset == 0) ||
4094 (alt_san_mac_blk_offset == 0xFFFF))
4095 goto wwn_prefix_out;
4097 /* check capability in alternative san mac address block */
4098 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
4099 if (hw->eeprom.ops.read(hw, offset, &caps))
4100 goto wwn_prefix_err;
4101 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
4102 goto wwn_prefix_out;
4104 /* get the corresponding prefix for WWNN/WWPN */
4105 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
4106 if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
4107 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4108 "eeprom read at offset %d failed", offset);
4111 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
4112 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
4113 goto wwn_prefix_err;
4116 return IXGBE_SUCCESS;
4119 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4120 "eeprom read at offset %d failed", offset);
4121 return IXGBE_SUCCESS;
4125 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
4126 * @hw: pointer to hardware structure
4127 * @bs: the fcoe boot status
4129 * This function will read the FCOE boot status from the iSCSI FCOE block
4131 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
4133 u16 offset, caps, flags;
4136 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
4138 /* clear output first */
4139 *bs = ixgbe_fcoe_bootstatus_unavailable;
4141 /* check if FCOE IBA block is present */
4142 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
4143 status = hw->eeprom.ops.read(hw, offset, &caps);
4144 if (status != IXGBE_SUCCESS)
4147 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
4150 /* check if iSCSI FCOE block is populated */
4151 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
4152 if (status != IXGBE_SUCCESS)
4155 if ((offset == 0) || (offset == 0xFFFF))
4158 /* read fcoe flags in iSCSI FCOE block */
4159 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
4160 status = hw->eeprom.ops.read(hw, offset, &flags);
4161 if (status != IXGBE_SUCCESS)
4164 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
4165 *bs = ixgbe_fcoe_bootstatus_enabled;
4167 *bs = ixgbe_fcoe_bootstatus_disabled;
4174 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
4175 * @hw: pointer to hardware structure
4176 * @enable: enable or disable switch for anti-spoofing
4177 * @pf: Physical Function pool - do not enable anti-spoofing for the PF
4180 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
4183 int pf_target_reg = pf >> 3;
4184 int pf_target_shift = pf % 8;
4187 if (hw->mac.type == ixgbe_mac_82598EB)
4191 pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
4194 * PFVFSPOOF register array is size 8 with 8 bits assigned to
4195 * MAC anti-spoof enables in each register array element.
4197 for (j = 0; j < pf_target_reg; j++)
4198 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
4201 * The PF should be allowed to spoof so that it can support
4202 * emulation mode NICs. Do not set the bits assigned to the PF
4204 pfvfspoof &= (1 << pf_target_shift) - 1;
4205 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
4208 * Remaining pools belong to the PF so they do not need to have
4209 * anti-spoofing enabled.
4211 for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
4212 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
4216 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
4217 * @hw: pointer to hardware structure
4218 * @enable: enable or disable switch for VLAN anti-spoofing
4219 * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
4222 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4224 int vf_target_reg = vf >> 3;
4225 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
4228 if (hw->mac.type == ixgbe_mac_82598EB)
4231 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4233 pfvfspoof |= (1 << vf_target_shift);
4235 pfvfspoof &= ~(1 << vf_target_shift);
4236 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4240 * ixgbe_get_device_caps_generic - Get additional device capabilities
4241 * @hw: pointer to hardware structure
4242 * @device_caps: the EEPROM word with the extra device capabilities
4244 * This function will read the EEPROM location for the device capabilities,
4245 * and return the word through device_caps.
4247 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
4249 DEBUGFUNC("ixgbe_get_device_caps_generic");
4251 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
4253 return IXGBE_SUCCESS;
4257 * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
4258 * @hw: pointer to hardware structure
4261 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
4266 DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
4268 /* Enable relaxed ordering */
4269 for (i = 0; i < hw->mac.max_tx_queues; i++) {
4270 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
4271 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4272 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
4275 for (i = 0; i < hw->mac.max_rx_queues; i++) {
4276 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
4277 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
4278 IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
4279 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
4285 * ixgbe_calculate_checksum - Calculate checksum for buffer
4286 * @buffer: pointer to EEPROM
4287 * @length: size of EEPROM to calculate a checksum for
4288 * Calculates the checksum for some buffer on a specified length. The
4289 * checksum calculated is returned.
4291 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
4296 DEBUGFUNC("ixgbe_calculate_checksum");
4301 for (i = 0; i < length; i++)
4304 return (u8) (0 - sum);
4308 * ixgbe_host_interface_command - Issue command to manageability block
4309 * @hw: pointer to the HW structure
4310 * @buffer: contains the command to write and where the return status will
4312 * @length: length of buffer, must be multiple of 4 bytes
4314 * Communicates with the manageability block. On success return IXGBE_SUCCESS
4315 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
4317 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
4321 u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
4322 u8 buf_len, dword_len;
4324 s32 ret_val = IXGBE_SUCCESS;
4326 DEBUGFUNC("ixgbe_host_interface_command");
4328 if (length == 0 || length & 0x3 ||
4329 length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4330 DEBUGOUT("Buffer length failure.\n");
4331 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4335 /* Check that the host interface is enabled. */
4336 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4337 if ((hicr & IXGBE_HICR_EN) == 0) {
4338 DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
4339 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4343 /* Calculate length in DWORDs */
4344 dword_len = length >> 2;
4347 * The device driver writes the relevant command block
4348 * into the ram area.
4350 for (i = 0; i < dword_len; i++)
4351 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4352 i, IXGBE_CPU_TO_LE32(buffer[i]));
4354 /* Setting this bit tells the ARC that a new command is pending. */
4355 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
4357 for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
4358 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4359 if (!(hicr & IXGBE_HICR_C))
4364 /* Check command successful completion. */
4365 if (i == IXGBE_HI_COMMAND_TIMEOUT ||
4366 (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
4367 DEBUGOUT("Command has failed with no status valid.\n");
4368 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4372 /* Calculate length in DWORDs */
4373 dword_len = hdr_size >> 2;
4375 /* first pull in the header so we know the buffer length */
4376 for (bi = 0; bi < dword_len; bi++) {
4377 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4378 IXGBE_LE32_TO_CPUS(&buffer[bi]);
4381 /* If there is any thing in data position pull it in */
4382 buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
4386 if (length < (buf_len + hdr_size)) {
4387 DEBUGOUT("Buffer not large enough for reply message.\n");
4388 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4392 /* Calculate length in DWORDs, add 3 for odd lengths */
4393 dword_len = (buf_len + 3) >> 2;
4395 /* Pull in the rest of the buffer (bi is where we left off)*/
4396 for (; bi <= dword_len; bi++) {
4397 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4398 IXGBE_LE32_TO_CPUS(&buffer[bi]);
4406 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
4407 * @hw: pointer to the HW structure
4408 * @maj: driver version major number
4409 * @min: driver version minor number
4410 * @build: driver version build number
4411 * @sub: driver version sub build number
4413 * Sends driver version number to firmware through the manageability
4414 * block. On success return IXGBE_SUCCESS
4415 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4416 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4418 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
4421 struct ixgbe_hic_drv_info fw_cmd;
4423 s32 ret_val = IXGBE_SUCCESS;
4425 DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4427 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)
4429 ret_val = IXGBE_ERR_SWFW_SYNC;
4433 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4434 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4435 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4436 fw_cmd.port_num = (u8)hw->bus.func;
4437 fw_cmd.ver_maj = maj;
4438 fw_cmd.ver_min = min;
4439 fw_cmd.ver_build = build;
4440 fw_cmd.ver_sub = sub;
4441 fw_cmd.hdr.checksum = 0;
4442 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4443 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4447 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4448 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4450 if (ret_val != IXGBE_SUCCESS)
4453 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4454 FW_CEM_RESP_STATUS_SUCCESS)
4455 ret_val = IXGBE_SUCCESS;
4457 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4462 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4468 * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4469 * @hw: pointer to hardware structure
4470 * @num_pb: number of packet buffers to allocate
4471 * @headroom: reserve n KB of headroom
4472 * @strategy: packet buffer allocation strategy
4474 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4477 u32 pbsize = hw->mac.rx_pb_size;
4479 u32 rxpktsize, txpktsize, txpbthresh;
4481 /* Reserve headroom */
4487 /* Divide remaining packet buffer space amongst the number of packet
4488 * buffers requested using supplied strategy.
4491 case PBA_STRATEGY_WEIGHTED:
4492 /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4493 * buffer with 5/8 of the packet buffer space.
4495 rxpktsize = (pbsize * 5) / (num_pb * 4);
4496 pbsize -= rxpktsize * (num_pb / 2);
4497 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4498 for (; i < (num_pb / 2); i++)
4499 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4500 /* Fall through to configure remaining packet buffers */
4501 case PBA_STRATEGY_EQUAL:
4502 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4503 for (; i < num_pb; i++)
4504 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4510 /* Only support an equally distributed Tx packet buffer strategy. */
4511 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4512 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4513 for (i = 0; i < num_pb; i++) {
4514 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4515 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4518 /* Clear unused TCs, if any, to zero buffer size*/
4519 for (; i < IXGBE_MAX_PB; i++) {
4520 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4521 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4522 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4527 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4528 * @hw: pointer to the hardware structure
4530 * The 82599 and x540 MACs can experience issues if TX work is still pending
4531 * when a reset occurs. This function prevents this by flushing the PCIe
4532 * buffers on the system.
4534 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4536 u32 gcr_ext, hlreg0;
4539 * If double reset is not requested then all transactions should
4540 * already be clear and as such there is no work to do
4542 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4546 * Set loopback enable to prevent any transmits from being sent
4547 * should the link come up. This assumes that the RXCTRL.RXEN bit
4548 * has already been cleared.
4550 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4551 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4553 /* initiate cleaning flow for buffers in the PCIe transaction layer */
4554 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4555 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4556 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4558 /* Flush all writes and allow 20usec for all transactions to clear */
4559 IXGBE_WRITE_FLUSH(hw);
4562 /* restore previous register values */
4563 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4564 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4569 * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
4570 * @hw: pointer to hardware structure
4571 * @map: pointer to u8 arr for returning map
4573 * Read the rtrup2tc HW register and resolve its content into map
4575 void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
4579 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
4580 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
4581 map[i] = IXGBE_RTRUP2TC_UP_MASK &
4582 (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));