1 /******************************************************************************
3 Copyright (c) 2001-2014, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 /* 82562G 10/100 Network Connection
36 * 82562G-2 10/100 Network Connection
37 * 82562GT 10/100 Network Connection
38 * 82562GT-2 10/100 Network Connection
39 * 82562V 10/100 Network Connection
40 * 82562V-2 10/100 Network Connection
41 * 82566DC-2 Gigabit Network Connection
42 * 82566DC Gigabit Network Connection
43 * 82566DM-2 Gigabit Network Connection
44 * 82566DM Gigabit Network Connection
45 * 82566MC Gigabit Network Connection
46 * 82566MM Gigabit Network Connection
47 * 82567LM Gigabit Network Connection
48 * 82567LF Gigabit Network Connection
49 * 82567V Gigabit Network Connection
50 * 82567LM-2 Gigabit Network Connection
51 * 82567LF-2 Gigabit Network Connection
52 * 82567V-2 Gigabit Network Connection
53 * 82567LF-3 Gigabit Network Connection
54 * 82567LM-3 Gigabit Network Connection
55 * 82567LM-4 Gigabit Network Connection
56 * 82577LM Gigabit Network Connection
57 * 82577LC Gigabit Network Connection
58 * 82578DM Gigabit Network Connection
59 * 82578DC Gigabit Network Connection
60 * 82579LM Gigabit Network Connection
61 * 82579V Gigabit Network Connection
62 * Ethernet Connection I217-LM
63 * Ethernet Connection I217-V
64 * Ethernet Connection I218-V
65 * Ethernet Connection I218-LM
66 * Ethernet Connection (2) I218-LM
67 * Ethernet Connection (2) I218-V
68 * Ethernet Connection (3) I218-LM
69 * Ethernet Connection (3) I218-V
72 #include "e1000_api.h"
74 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
75 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
76 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
77 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
78 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
79 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
80 static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
81 static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
82 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
83 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
86 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
87 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
88 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
89 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
91 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
93 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
94 u16 words, u16 *data);
95 static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset,
96 u16 words, u16 *data);
97 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
98 u16 words, u16 *data);
99 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
100 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
101 static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw);
102 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
104 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
105 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
106 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw);
107 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw);
108 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
109 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
110 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
111 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
112 u16 *speed, u16 *duplex);
113 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
114 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
115 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
116 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
117 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
118 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
119 static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
120 static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
121 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
122 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
123 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
124 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
125 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
126 u32 offset, u8 *data);
127 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
129 static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
131 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
132 u32 offset, u16 *data);
133 static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
134 u32 offset, u32 *data);
135 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
136 u32 offset, u8 byte);
137 static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
138 u32 offset, u32 dword);
139 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
140 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
141 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
142 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
143 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
144 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
145 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr);
147 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
148 /* Offset 04h HSFSTS */
149 union ich8_hws_flash_status {
151 u16 flcdone:1; /* bit 0 Flash Cycle Done */
152 u16 flcerr:1; /* bit 1 Flash Cycle Error */
153 u16 dael:1; /* bit 2 Direct Access error Log */
154 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
155 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
156 u16 reserved1:2; /* bit 13:6 Reserved */
157 u16 reserved2:6; /* bit 13:6 Reserved */
158 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
159 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
164 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
165 /* Offset 06h FLCTL */
166 union ich8_hws_flash_ctrl {
167 struct ich8_hsflctl {
168 u16 flcgo:1; /* 0 Flash Cycle Go */
169 u16 flcycle:2; /* 2:1 Flash Cycle */
170 u16 reserved:5; /* 7:3 Reserved */
171 u16 fldbcount:2; /* 9:8 Flash Data Byte Count */
172 u16 flockdn:6; /* 15:10 Reserved */
177 /* ICH Flash Region Access Permissions */
178 union ich8_hws_flash_regacc {
180 u32 grra:8; /* 0:7 GbE region Read Access */
181 u32 grwa:8; /* 8:15 GbE region Write Access */
182 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
183 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
189 * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
190 * @hw: pointer to the HW structure
192 * Test access to the PHY registers by reading the PHY ID registers. If
193 * the PHY ID is already known (e.g. resume path) compare it with known ID,
194 * otherwise assume the read PHY ID is correct if it is valid.
196 * Assumes the sw/fw/hw semaphore is already acquired.
198 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
206 for (retry_count = 0; retry_count < 2; retry_count++) {
207 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
208 if (ret_val || (phy_reg == 0xFFFF))
210 phy_id = (u32)(phy_reg << 16);
212 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
213 if (ret_val || (phy_reg == 0xFFFF)) {
217 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
222 if (hw->phy.id == phy_id)
226 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
230 /* In case the PHY needs to be in mdio slow mode,
231 * set slow mode and try to get the PHY id again.
233 if (hw->mac.type < e1000_pch_lpt) {
234 hw->phy.ops.release(hw);
235 ret_val = e1000_set_mdio_slow_mode_hv(hw);
237 ret_val = e1000_get_phy_id(hw);
238 hw->phy.ops.acquire(hw);
244 if (hw->mac.type == e1000_pch_lpt ||
245 hw->mac.type == e1000_pch_spt) {
246 /* Unforce SMBus mode in PHY */
247 hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
248 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
249 hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
251 /* Unforce SMBus mode in MAC */
252 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
253 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
254 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
261 * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
262 * @hw: pointer to the HW structure
264 * Toggling the LANPHYPC pin value fully power-cycles the PHY and is
265 * used to reset the PHY to a quiescent state when necessary.
267 static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
271 DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
273 /* Set Phy Config Counter to 50msec */
274 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
275 mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
276 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
277 E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
279 /* Toggle LANPHYPC Value bit */
280 mac_reg = E1000_READ_REG(hw, E1000_CTRL);
281 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
282 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
283 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
284 E1000_WRITE_FLUSH(hw);
286 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
287 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
288 E1000_WRITE_FLUSH(hw);
290 if (hw->mac.type < e1000_pch_lpt) {
297 } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
298 E1000_CTRL_EXT_LPCD) && count--);
305 * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
306 * @hw: pointer to the HW structure
308 * Workarounds/flow necessary for PHY initialization during driver load
311 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
313 u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
316 DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
318 /* Gate automatic PHY configuration by hardware on managed and
319 * non-managed 82579 and newer adapters.
321 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
323 /* It is not possible to be certain of the current state of ULP
324 * so forcibly disable it.
326 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
327 e1000_disable_ulp_lpt_lp(hw, TRUE);
329 ret_val = hw->phy.ops.acquire(hw);
331 DEBUGOUT("Failed to initialize PHY flow\n");
335 /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
336 * inaccessible and resetting the PHY is not blocked, toggle the
337 * LANPHYPC Value bit to force the interconnect to PCIe mode.
339 switch (hw->mac.type) {
342 if (e1000_phy_is_accessible_pchlan(hw))
345 /* Before toggling LANPHYPC, see if PHY is accessible by
346 * forcing MAC to SMBus mode first.
348 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
349 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
350 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
352 /* Wait 50 milliseconds for MAC to finish any retries
353 * that it might be trying to perform from previous
354 * attempts to acknowledge any phy read requests.
360 if (e1000_phy_is_accessible_pchlan(hw))
365 if ((hw->mac.type == e1000_pchlan) &&
366 (fwsm & E1000_ICH_FWSM_FW_VALID))
369 if (hw->phy.ops.check_reset_block(hw)) {
370 DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
371 ret_val = -E1000_ERR_PHY;
375 /* Toggle LANPHYPC Value bit */
376 e1000_toggle_lanphypc_pch_lpt(hw);
377 if (hw->mac.type >= e1000_pch_lpt) {
378 if (e1000_phy_is_accessible_pchlan(hw))
381 /* Toggling LANPHYPC brings the PHY out of SMBus mode
382 * so ensure that the MAC is also out of SMBus mode
384 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
385 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
386 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
388 if (e1000_phy_is_accessible_pchlan(hw))
391 ret_val = -E1000_ERR_PHY;
398 hw->phy.ops.release(hw);
401 /* Check to see if able to reset PHY. Print error if not */
402 if (hw->phy.ops.check_reset_block(hw)) {
403 ERROR_REPORT("Reset blocked by ME\n");
407 /* Reset the PHY before any access to it. Doing so, ensures
408 * that the PHY is in a known good state before we read/write
409 * PHY registers. The generic reset is sufficient here,
410 * because we haven't determined the PHY type yet.
412 ret_val = e1000_phy_hw_reset_generic(hw);
416 /* On a successful reset, possibly need to wait for the PHY
417 * to quiesce to an accessible state before returning control
418 * to the calling function. If the PHY does not quiesce, then
419 * return E1000E_BLK_PHY_RESET, as this is the condition that
422 ret_val = hw->phy.ops.check_reset_block(hw);
424 ERROR_REPORT("ME blocked access to PHY after reset\n");
428 /* Ungate automatic PHY configuration on non-managed 82579 */
429 if ((hw->mac.type == e1000_pch2lan) &&
430 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
432 e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
439 * e1000_init_phy_params_pchlan - Initialize PHY function pointers
440 * @hw: pointer to the HW structure
442 * Initialize family-specific PHY parameters and function pointers.
444 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
446 struct e1000_phy_info *phy = &hw->phy;
449 DEBUGFUNC("e1000_init_phy_params_pchlan");
452 phy->reset_delay_us = 100;
454 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
455 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
456 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
457 phy->ops.set_page = e1000_set_page_igp;
458 phy->ops.read_reg = e1000_read_phy_reg_hv;
459 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
460 phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
461 phy->ops.release = e1000_release_swflag_ich8lan;
462 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
463 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
464 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
465 phy->ops.write_reg = e1000_write_phy_reg_hv;
466 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
467 phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
468 phy->ops.power_up = e1000_power_up_phy_copper;
469 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
470 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
472 phy->id = e1000_phy_unknown;
474 ret_val = e1000_init_phy_workarounds_pchlan(hw);
478 if (phy->id == e1000_phy_unknown)
479 switch (hw->mac.type) {
481 ret_val = e1000_get_phy_id(hw);
484 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
490 /* In case the PHY needs to be in mdio slow mode,
491 * set slow mode and try to get the PHY id again.
493 ret_val = e1000_set_mdio_slow_mode_hv(hw);
496 ret_val = e1000_get_phy_id(hw);
501 phy->type = e1000_get_phy_type_from_id(phy->id);
504 case e1000_phy_82577:
505 case e1000_phy_82579:
507 phy->ops.check_polarity = e1000_check_polarity_82577;
508 phy->ops.force_speed_duplex =
509 e1000_phy_force_speed_duplex_82577;
510 phy->ops.get_cable_length = e1000_get_cable_length_82577;
511 phy->ops.get_info = e1000_get_phy_info_82577;
512 phy->ops.commit = e1000_phy_sw_reset_generic;
514 case e1000_phy_82578:
515 phy->ops.check_polarity = e1000_check_polarity_m88;
516 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
517 phy->ops.get_cable_length = e1000_get_cable_length_m88;
518 phy->ops.get_info = e1000_get_phy_info_m88;
521 ret_val = -E1000_ERR_PHY;
529 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
530 * @hw: pointer to the HW structure
532 * Initialize family-specific PHY parameters and function pointers.
534 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
536 struct e1000_phy_info *phy = &hw->phy;
540 DEBUGFUNC("e1000_init_phy_params_ich8lan");
543 phy->reset_delay_us = 100;
545 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
546 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
547 phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
548 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
549 phy->ops.read_reg = e1000_read_phy_reg_igp;
550 phy->ops.release = e1000_release_swflag_ich8lan;
551 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
552 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
553 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
554 phy->ops.write_reg = e1000_write_phy_reg_igp;
555 phy->ops.power_up = e1000_power_up_phy_copper;
556 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
558 /* We may need to do this twice - once for IGP and if that fails,
559 * we'll set BM func pointers and try again
561 ret_val = e1000_determine_phy_address(hw);
563 phy->ops.write_reg = e1000_write_phy_reg_bm;
564 phy->ops.read_reg = e1000_read_phy_reg_bm;
565 ret_val = e1000_determine_phy_address(hw);
567 DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
573 while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
576 ret_val = e1000_get_phy_id(hw);
583 case IGP03E1000_E_PHY_ID:
584 phy->type = e1000_phy_igp_3;
585 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
586 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
587 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
588 phy->ops.get_info = e1000_get_phy_info_igp;
589 phy->ops.check_polarity = e1000_check_polarity_igp;
590 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
593 case IFE_PLUS_E_PHY_ID:
595 phy->type = e1000_phy_ife;
596 phy->autoneg_mask = E1000_ALL_NOT_GIG;
597 phy->ops.get_info = e1000_get_phy_info_ife;
598 phy->ops.check_polarity = e1000_check_polarity_ife;
599 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
601 case BME1000_E_PHY_ID:
602 phy->type = e1000_phy_bm;
603 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
604 phy->ops.read_reg = e1000_read_phy_reg_bm;
605 phy->ops.write_reg = e1000_write_phy_reg_bm;
606 phy->ops.commit = e1000_phy_sw_reset_generic;
607 phy->ops.get_info = e1000_get_phy_info_m88;
608 phy->ops.check_polarity = e1000_check_polarity_m88;
609 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
612 return -E1000_ERR_PHY;
616 return E1000_SUCCESS;
620 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
621 * @hw: pointer to the HW structure
623 * Initialize family-specific NVM parameters and function
626 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
628 struct e1000_nvm_info *nvm = &hw->nvm;
629 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
630 u32 gfpreg, sector_base_addr, sector_end_addr;
634 DEBUGFUNC("e1000_init_nvm_params_ich8lan");
636 /* Can't read flash registers if the register set isn't mapped. */
637 nvm->type = e1000_nvm_flash_sw;
639 /* XXX turn flash_address into flash_reg_off or something more appropriate */
640 #define E1000_FLASH_BASE_ADDR 0xE000 /* offset of NVM access regs */
641 #define NVM_SIZE_MULTIPLIER 4096
643 if (hw->mac.type == e1000_pch_spt) {
645 * In SPT the flash is in the GbE flash region of the
646 * main hw map. GFPREG does not exist. Take NVM size from
647 * the STRAP register.
649 nvm->flash_base_addr = 0;
650 nvm_size = (((E1000_READ_REG(hw, E1000_STRAP) >> 1) & 0x1F) + 1)
651 * NVM_SIZE_MULTIPLIER;
652 nvm->flash_bank_size = nvm_size / 2;
653 /* Adjust to word count */
654 nvm->flash_bank_size /= sizeof(u16);
655 /* Set the base address for flash register access */
656 hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
658 if (!hw->flash_address) {
659 DEBUGOUT("ERROR: Flash registers not mapped\n");
660 return -E1000_ERR_CONFIG;
663 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
665 /* sector_X_addr is a "sector"-aligned address (4096 bytes)
666 * Add 1 to sector_end_addr since this sector is included in
669 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
670 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
672 /* flash_base_addr is byte-aligned */
673 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
675 /* find total size of the NVM, then cut in half since the total
676 * size represents two separate NVM banks.
678 nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
679 << FLASH_SECTOR_ADDR_SHIFT);
680 nvm->flash_bank_size /= 2;
681 /* Adjust to word count */
682 nvm->flash_bank_size /= sizeof(u16);
685 nvm->word_size = E1000_SHADOW_RAM_WORDS;
687 /* Clear shadow ram */
688 for (i = 0; i < nvm->word_size; i++) {
689 dev_spec->shadow_ram[i].modified = FALSE;
690 dev_spec->shadow_ram[i].value = 0xFFFF;
693 /* Function Pointers */
694 nvm->ops.acquire = e1000_acquire_nvm_ich8lan;
695 nvm->ops.release = e1000_release_nvm_ich8lan;
696 if (hw->mac.type == e1000_pch_spt) {
697 nvm->ops.read = e1000_read_nvm_spt;
698 nvm->ops.update = e1000_update_nvm_checksum_spt;
700 nvm->ops.read = e1000_read_nvm_ich8lan;
701 nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
703 nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
704 nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan;
705 nvm->ops.write = e1000_write_nvm_ich8lan;
707 return E1000_SUCCESS;
711 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
712 * @hw: pointer to the HW structure
714 * Initialize family-specific MAC parameters and function
717 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
719 struct e1000_mac_info *mac = &hw->mac;
722 DEBUGFUNC("e1000_init_mac_params_ich8lan");
724 /* Set media type function pointer */
725 hw->phy.media_type = e1000_media_type_copper;
727 /* Set mta register count */
728 mac->mta_reg_count = 32;
729 /* Set rar entry count */
730 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
731 if (mac->type == e1000_ich8lan)
732 mac->rar_entry_count--;
733 /* Set if part includes ASF firmware */
734 mac->asf_firmware_present = TRUE;
736 mac->has_fwsm = TRUE;
737 /* ARC subsystem not supported */
738 mac->arc_subsystem_valid = FALSE;
739 /* Adaptive IFS supported */
740 mac->adaptive_ifs = TRUE;
742 /* Function pointers */
744 /* bus type/speed/width */
745 mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
747 mac->ops.set_lan_id = e1000_set_lan_id_single_port;
749 mac->ops.reset_hw = e1000_reset_hw_ich8lan;
750 /* hw initialization */
751 mac->ops.init_hw = e1000_init_hw_ich8lan;
753 mac->ops.setup_link = e1000_setup_link_ich8lan;
754 /* physical interface setup */
755 mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
757 mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
759 mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
760 /* multicast address update */
761 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
762 /* clear hardware counters */
763 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
765 /* LED and other operations */
770 /* check management mode */
771 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
773 mac->ops.id_led_init = e1000_id_led_init_generic;
775 mac->ops.blink_led = e1000_blink_led_generic;
777 mac->ops.setup_led = e1000_setup_led_generic;
779 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
780 /* turn on/off LED */
781 mac->ops.led_on = e1000_led_on_ich8lan;
782 mac->ops.led_off = e1000_led_off_ich8lan;
785 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
786 mac->ops.rar_set = e1000_rar_set_pch2lan;
790 /* multicast address update for pch2 */
791 mac->ops.update_mc_addr_list =
792 e1000_update_mc_addr_list_pch2lan;
794 /* save PCH revision_id */
795 e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
796 hw->revision_id = (u8)(pci_cfg &= 0x000F);
797 /* check management mode */
798 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
800 mac->ops.id_led_init = e1000_id_led_init_pchlan;
802 mac->ops.setup_led = e1000_setup_led_pchlan;
804 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
805 /* turn on/off LED */
806 mac->ops.led_on = e1000_led_on_pchlan;
807 mac->ops.led_off = e1000_led_off_pchlan;
813 if (mac->type == e1000_pch_lpt ||
814 mac->type == e1000_pch_spt) {
815 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
816 mac->ops.rar_set = e1000_rar_set_pch_lpt;
817 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
818 mac->ops.set_obff_timer = e1000_set_obff_timer_pch_lpt;
821 /* Enable PCS Lock-loss workaround for ICH8 */
822 if (mac->type == e1000_ich8lan)
823 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
825 return E1000_SUCCESS;
829 * __e1000_access_emi_reg_locked - Read/write EMI register
830 * @hw: pointer to the HW structure
831 * @addr: EMI address to program
832 * @data: pointer to value to read/write from/to the EMI address
833 * @read: boolean flag to indicate read or write
835 * This helper function assumes the SW/FW/HW Semaphore is already acquired.
837 static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
838 u16 *data, bool read)
842 DEBUGFUNC("__e1000_access_emi_reg_locked");
844 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
849 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
852 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
859 * e1000_read_emi_reg_locked - Read Extended Management Interface register
860 * @hw: pointer to the HW structure
861 * @addr: EMI address to program
862 * @data: value to be read from the EMI address
864 * Assumes the SW/FW/HW Semaphore is already acquired.
866 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
868 DEBUGFUNC("e1000_read_emi_reg_locked");
870 return __e1000_access_emi_reg_locked(hw, addr, data, TRUE);
874 * e1000_write_emi_reg_locked - Write Extended Management Interface register
875 * @hw: pointer to the HW structure
876 * @addr: EMI address to program
877 * @data: value to be written to the EMI address
879 * Assumes the SW/FW/HW Semaphore is already acquired.
881 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
883 DEBUGFUNC("e1000_read_emi_reg_locked");
885 return __e1000_access_emi_reg_locked(hw, addr, &data, FALSE);
889 * e1000_set_eee_pchlan - Enable/disable EEE support
890 * @hw: pointer to the HW structure
892 * Enable/disable EEE based on setting in dev_spec structure, the duplex of
893 * the link and the EEE capabilities of the link partner. The LPI Control
894 * register bits will remain set only if/when link is up.
896 * EEE LPI must not be asserted earlier than one second after link is up.
897 * On 82579, EEE LPI should not be enabled until such time otherwise there
898 * can be link issues with some switches. Other devices can have EEE LPI
899 * enabled immediately upon link up since they have a timer in hardware which
900 * prevents LPI from being asserted too early.
902 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
904 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
906 u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
908 DEBUGFUNC("e1000_set_eee_pchlan");
910 switch (hw->phy.type) {
911 case e1000_phy_82579:
912 lpa = I82579_EEE_LP_ABILITY;
913 pcs_status = I82579_EEE_PCS_STATUS;
914 adv_addr = I82579_EEE_ADVERTISEMENT;
917 lpa = I217_EEE_LP_ABILITY;
918 pcs_status = I217_EEE_PCS_STATUS;
919 adv_addr = I217_EEE_ADVERTISEMENT;
922 return E1000_SUCCESS;
925 ret_val = hw->phy.ops.acquire(hw);
929 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
933 /* Clear bits that enable EEE in various speeds */
934 lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
936 /* Enable EEE if not disabled by user */
937 if (!dev_spec->eee_disable) {
938 /* Save off link partner's EEE ability */
939 ret_val = e1000_read_emi_reg_locked(hw, lpa,
940 &dev_spec->eee_lp_ability);
944 /* Read EEE advertisement */
945 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
949 /* Enable EEE only for speeds in which the link partner is
950 * EEE capable and for which we advertise EEE.
952 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
953 lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
955 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
956 hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
957 if (data & NWAY_LPAR_100TX_FD_CAPS)
958 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
960 /* EEE is not supported in 100Half, so ignore
961 * partner's EEE in 100 ability if full-duplex
964 dev_spec->eee_lp_ability &=
965 ~I82579_EEE_100_SUPPORTED;
969 if (hw->phy.type == e1000_phy_82579) {
970 ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
975 data &= ~I82579_LPI_100_PLL_SHUT;
976 ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
980 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
981 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
985 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
987 hw->phy.ops.release(hw);
993 * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
994 * @hw: pointer to the HW structure
995 * @link: link up bool flag
997 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
998 * preventing further DMA write requests. Workaround the issue by disabling
999 * the de-assertion of the clock request when in 1Gpbs mode.
1000 * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
1001 * speeds in order to avoid Tx hangs.
1003 static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
1005 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1006 u32 status = E1000_READ_REG(hw, E1000_STATUS);
1007 s32 ret_val = E1000_SUCCESS;
1010 if (link && (status & E1000_STATUS_SPEED_1000)) {
1011 ret_val = hw->phy.ops.acquire(hw);
1016 e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1022 e1000_write_kmrn_reg_locked(hw,
1023 E1000_KMRNCTRLSTA_K1_CONFIG,
1025 ~E1000_KMRNCTRLSTA_K1_ENABLE);
1031 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
1032 fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
1035 e1000_write_kmrn_reg_locked(hw,
1036 E1000_KMRNCTRLSTA_K1_CONFIG,
1039 hw->phy.ops.release(hw);
1041 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
1042 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1044 if (!link || ((status & E1000_STATUS_SPEED_100) &&
1045 (status & E1000_STATUS_FD)))
1046 goto update_fextnvm6;
1048 ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, ®);
1052 /* Clear link status transmit timeout */
1053 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1055 if (status & E1000_STATUS_SPEED_100) {
1056 /* Set inband Tx timeout to 5x10us for 100Half */
1057 reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1059 /* Do not extend the K1 entry latency for 100Half */
1060 fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1062 /* Set inband Tx timeout to 50x10us for 10Full/Half */
1064 I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1066 /* Extend the K1 entry latency for 10 Mbps */
1067 fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1070 ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1075 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1081 static u64 e1000_ltr2ns(u16 ltr)
1085 /* Determine the latency in nsec based on the LTR value & scale */
1086 value = ltr & E1000_LTRV_VALUE_MASK;
1087 scale = (ltr & E1000_LTRV_SCALE_MASK) >> E1000_LTRV_SCALE_SHIFT;
1089 return value * (1 << (scale * E1000_LTRV_SCALE_FACTOR));
1093 * e1000_platform_pm_pch_lpt - Set platform power management values
1094 * @hw: pointer to the HW structure
1095 * @link: bool indicating link status
1097 * Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
1098 * GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
1099 * when link is up (which must not exceed the maximum latency supported
1100 * by the platform), otherwise specify there is no LTR requirement.
1101 * Unlike TRUE-PCIe devices which set the LTR maximum snoop/no-snoop
1102 * latencies in the LTR Extended Capability Structure in the PCIe Extended
1103 * Capability register set, on this device LTR is set by writing the
1104 * equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
1105 * set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
1106 * message to the PMC.
1108 * Use the LTR value to calculate the Optimized Buffer Flush/Fill (OBFF)
1111 static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
1113 u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
1114 link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
1115 u16 lat_enc = 0; /* latency encoded */
1118 DEBUGFUNC("e1000_platform_pm_pch_lpt");
1121 u16 speed, duplex, scale = 0;
1122 u16 max_snoop, max_nosnoop;
1123 u16 max_ltr_enc; /* max LTR latency encoded */
1124 s64 lat_ns; /* latency (ns) */
1128 if (!hw->mac.max_frame_size) {
1129 DEBUGOUT("max_frame_size not set.\n");
1130 return -E1000_ERR_CONFIG;
1133 hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
1135 DEBUGOUT("Speed not set.\n");
1136 return -E1000_ERR_CONFIG;
1139 /* Rx Packet Buffer Allocation size (KB) */
1140 rxa = E1000_READ_REG(hw, E1000_PBA) & E1000_PBA_RXA_MASK;
1142 /* Determine the maximum latency tolerated by the device.
1144 * Per the PCIe spec, the tolerated latencies are encoded as
1145 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
1146 * a 10-bit value (0-1023) to provide a range from 1 ns to
1147 * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
1148 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
1150 lat_ns = ((s64)rxa * 1024 -
1151 (2 * (s64)hw->mac.max_frame_size)) * 8 * 1000;
1158 while (value > E1000_LTRV_VALUE_MASK) {
1160 value = E1000_DIVIDE_ROUND_UP(value, (1 << 5));
1162 if (scale > E1000_LTRV_SCALE_MAX) {
1163 DEBUGOUT1("Invalid LTR latency scale %d\n", scale);
1164 return -E1000_ERR_CONFIG;
1166 lat_enc = (u16)((scale << E1000_LTRV_SCALE_SHIFT) | value);
1168 /* Determine the maximum latency tolerated by the platform */
1169 e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT, &max_snoop);
1170 e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
1171 max_ltr_enc = E1000_MAX(max_snoop, max_nosnoop);
1173 if (lat_enc > max_ltr_enc) {
1174 lat_enc = max_ltr_enc;
1175 lat_ns = e1000_ltr2ns(max_ltr_enc);
1179 lat_ns *= speed * 1000;
1181 lat_ns /= 1000000000;
1182 obff_hwm = (s32)(rxa - lat_ns);
1184 if ((obff_hwm < 0) || (obff_hwm > E1000_SVT_OFF_HWM_MASK)) {
1185 DEBUGOUT1("Invalid high water mark %d\n", obff_hwm);
1186 return -E1000_ERR_CONFIG;
1190 /* Set Snoop and No-Snoop latencies the same */
1191 reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
1192 E1000_WRITE_REG(hw, E1000_LTRV, reg);
1194 /* Set OBFF high water mark */
1195 reg = E1000_READ_REG(hw, E1000_SVT) & ~E1000_SVT_OFF_HWM_MASK;
1197 E1000_WRITE_REG(hw, E1000_SVT, reg);
1200 reg = E1000_READ_REG(hw, E1000_SVCR);
1201 reg |= E1000_SVCR_OFF_EN;
1202 /* Always unblock interrupts to the CPU even when the system is
1203 * in OBFF mode. This ensures that small round-robin traffic
1204 * (like ping) does not get dropped or experience long latency.
1206 reg |= E1000_SVCR_OFF_MASKINT;
1207 E1000_WRITE_REG(hw, E1000_SVCR, reg);
1209 return E1000_SUCCESS;
1213 * e1000_set_obff_timer_pch_lpt - Update Optimized Buffer Flush/Fill timer
1214 * @hw: pointer to the HW structure
1215 * @itr: interrupt throttling rate
1217 * Configure OBFF with the updated interrupt rate.
1219 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr)
1224 DEBUGFUNC("e1000_set_obff_timer_pch_lpt");
1226 /* Convert ITR value into microseconds for OBFF timer */
1227 timer = itr & E1000_ITR_MASK;
1228 timer = (timer * E1000_ITR_MULT) / 1000;
1230 if ((timer < 0) || (timer > E1000_ITR_MASK)) {
1231 DEBUGOUT1("Invalid OBFF timer %d\n", timer);
1232 return -E1000_ERR_CONFIG;
1235 svcr = E1000_READ_REG(hw, E1000_SVCR);
1236 svcr &= ~E1000_SVCR_OFF_TIMER_MASK;
1237 svcr |= timer << E1000_SVCR_OFF_TIMER_SHIFT;
1238 E1000_WRITE_REG(hw, E1000_SVCR, svcr);
1240 return E1000_SUCCESS;
1244 * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1245 * @hw: pointer to the HW structure
1246 * @to_sx: boolean indicating a system power state transition to Sx
1248 * When link is down, configure ULP mode to significantly reduce the power
1249 * to the PHY. If on a Manageability Engine (ME) enabled system, tell the
1250 * ME firmware to start the ULP configuration. If not on an ME enabled
1251 * system, configure the ULP mode by software.
1253 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1256 s32 ret_val = E1000_SUCCESS;
1259 if ((hw->mac.type < e1000_pch_lpt) ||
1260 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1261 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1262 (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1263 (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1264 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1267 if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1268 /* Request ME configure ULP mode in the PHY */
1269 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1270 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1271 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1279 /* Poll up to 5 seconds for Cable Disconnected indication */
1280 while (!(E1000_READ_REG(hw, E1000_FEXT) &
1281 E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1282 /* Bail if link is re-acquired */
1283 if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1284 return -E1000_ERR_PHY;
1291 DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1292 (E1000_READ_REG(hw, E1000_FEXT) &
1293 E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1297 ret_val = hw->phy.ops.acquire(hw);
1301 /* Force SMBus mode in PHY */
1302 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1305 phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1306 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1308 /* Force SMBus mode in MAC */
1309 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1310 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1311 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1313 /* Set Inband ULP Exit, Reset to SMBus mode and
1314 * Disable SMBus Release on PERST# in PHY
1316 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1319 phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1320 I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1322 if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1323 phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1325 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1327 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1329 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1331 /* Set Disable SMBus Release on PERST# in MAC */
1332 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1333 mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1334 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1336 /* Commit ULP changes in PHY by starting auto ULP configuration */
1337 phy_reg |= I218_ULP_CONFIG1_START;
1338 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1340 hw->phy.ops.release(hw);
1343 DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1345 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1351 * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1352 * @hw: pointer to the HW structure
1353 * @force: boolean indicating whether or not to force disabling ULP
1355 * Un-configure ULP mode when link is up, the system is transitioned from
1356 * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled
1357 * system, poll for an indication from ME that ULP has been un-configured.
1358 * If not on an ME enabled system, un-configure the ULP mode by software.
1360 * During nominal operation, this function is called when link is acquired
1361 * to disable ULP mode (force=FALSE); otherwise, for example when unloading
1362 * the driver or during Sx->S0 transitions, this is called with force=TRUE
1363 * to forcibly disable ULP.
1365 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1367 s32 ret_val = E1000_SUCCESS;
1372 if ((hw->mac.type < e1000_pch_lpt) ||
1373 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1374 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1375 (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1376 (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1377 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1380 if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1382 /* Request ME un-configure ULP mode in the PHY */
1383 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1384 mac_reg &= ~E1000_H2ME_ULP;
1385 mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1386 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1389 /* Poll up to 100msec for ME to clear ULP_CFG_DONE */
1390 while (E1000_READ_REG(hw, E1000_FWSM) &
1391 E1000_FWSM_ULP_CFG_DONE) {
1393 ret_val = -E1000_ERR_PHY;
1399 DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1402 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1403 mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1404 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1406 /* Clear H2ME.ULP after ME ULP configuration */
1407 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1408 mac_reg &= ~E1000_H2ME_ULP;
1409 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1415 ret_val = hw->phy.ops.acquire(hw);
1420 /* Toggle LANPHYPC Value bit */
1421 e1000_toggle_lanphypc_pch_lpt(hw);
1423 /* Unforce SMBus mode in PHY */
1424 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1426 /* The MAC might be in PCIe mode, so temporarily force to
1427 * SMBus mode in order to access the PHY.
1429 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1430 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1431 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1435 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1440 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1441 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1443 /* Unforce SMBus mode in MAC */
1444 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1445 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1446 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1448 /* When ULP mode was previously entered, K1 was disabled by the
1449 * hardware. Re-Enable K1 in the PHY when exiting ULP.
1451 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1454 phy_reg |= HV_PM_CTRL_K1_ENABLE;
1455 e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1457 /* Clear ULP enabled configuration */
1458 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1461 phy_reg &= ~(I218_ULP_CONFIG1_IND |
1462 I218_ULP_CONFIG1_STICKY_ULP |
1463 I218_ULP_CONFIG1_RESET_TO_SMBUS |
1464 I218_ULP_CONFIG1_WOL_HOST |
1465 I218_ULP_CONFIG1_INBAND_EXIT |
1466 I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1467 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1469 /* Commit ULP changes by starting auto ULP configuration */
1470 phy_reg |= I218_ULP_CONFIG1_START;
1471 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1473 /* Clear Disable SMBus Release on PERST# in MAC */
1474 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1475 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1476 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1479 hw->phy.ops.release(hw);
1481 hw->phy.ops.reset(hw);
1486 DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1488 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1494 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1495 * @hw: pointer to the HW structure
1497 * Checks to see of the link status of the hardware has changed. If a
1498 * change in link status has been detected, then we read the PHY registers
1499 * to get the current speed/duplex if link exists.
1501 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1503 struct e1000_mac_info *mac = &hw->mac;
1508 DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1510 /* We only want to go out to the PHY registers to see if Auto-Neg
1511 * has completed and/or if our link status has changed. The
1512 * get_link_status flag is set upon receiving a Link Status
1513 * Change or Rx Sequence Error interrupt.
1515 if (!mac->get_link_status)
1516 return E1000_SUCCESS;
1518 /* First we want to see if the MII Status Register reports
1519 * link. If so, then we want to get the current speed/duplex
1522 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1526 if (hw->mac.type == e1000_pchlan) {
1527 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1532 /* When connected at 10Mbps half-duplex, some parts are excessively
1533 * aggressive resulting in many collisions. To avoid this, increase
1534 * the IPG and reduce Rx latency in the PHY.
1536 if (((hw->mac.type == e1000_pch2lan) ||
1537 (hw->mac.type == e1000_pch_lpt) ||
1538 (hw->mac.type == e1000_pch_spt)) && link) {
1540 reg = E1000_READ_REG(hw, E1000_STATUS);
1541 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
1544 reg = E1000_READ_REG(hw, E1000_TIPG);
1545 reg &= ~E1000_TIPG_IPGT_MASK;
1547 E1000_WRITE_REG(hw, E1000_TIPG, reg);
1549 /* Reduce Rx latency in analog PHY */
1550 ret_val = hw->phy.ops.acquire(hw);
1554 if (hw->mac.type == e1000_pch2lan)
1555 emi_addr = I82579_RX_CONFIG;
1557 emi_addr = I217_RX_CONFIG;
1558 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0);
1560 hw->phy.ops.release(hw);
1564 } else if (hw->mac.type == e1000_pch_spt &&
1565 (reg & E1000_STATUS_FD) &&
1566 (reg & E1000_STATUS_SPEED_MASK) == E1000_STATUS_SPEED_1000) {
1567 reg &= ~E1000_TIPG_IPGT_MASK;
1569 E1000_WRITE_REG(hw, E1000_TIPG, reg);
1571 ret_val = hw->phy.ops.acquire(hw);
1575 ret_val = e1000_write_emi_reg_locked(hw, I217_RX_CONFIG, 1);
1577 hw->phy.ops.release(hw);
1586 reg = E1000_READ_REG(hw, E1000_STATUS);
1587 if (hw->mac.type == e1000_pch_spt &&
1588 (reg & E1000_STATUS_FD) &&
1589 (reg & E1000_STATUS_SPEED_MASK) == E1000_STATUS_SPEED_1000) {
1593 ret_val = hw->phy.ops.acquire(hw);
1596 hw->phy.ops.read_reg_locked(hw, PHY_REG(776, 20), &data);
1597 ptr_gap = (data & (0x3FF << 2)) >> 2;
1598 if (ptr_gap < 0x18) {
1599 data &= ~(0x3FF << 2);
1600 data |= (0x18 << 2);
1601 hw->phy.ops.write_reg_locked(hw,
1605 hw->phy.ops.release(hw);
1612 /* I217 Packet Loss issue:
1613 * ensure that FEXTNVM4 Beacon Duration is set correctly
1615 * Set the Beacon Duration for I217 to 8 usec
1617 if ((hw->mac.type == e1000_pch_lpt) || (hw->mac.type == e1000_pch_spt)) {
1620 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1621 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1622 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1623 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1626 /* Work-around I218 hang issue */
1627 if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1628 (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1629 (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1630 (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1631 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1636 if (hw->mac.type == e1000_pch_lpt ||
1637 hw->mac.type == e1000_pch_spt) {
1638 /* Set platform power management values for
1639 * Latency Tolerance Reporting (LTR)
1640 * Optimized Buffer Flush/Fill (OBFF)
1642 ret_val = e1000_platform_pm_pch_lpt(hw, link);
1647 /* Clear link partner's EEE ability */
1648 hw->dev_spec.ich8lan.eee_lp_ability = 0;
1650 /* FEXTNVM6 K1-off workaround */
1651 if (hw->mac.type == e1000_pch_spt) {
1652 u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG);
1653 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1655 if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
1656 fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
1658 fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1659 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1663 return E1000_SUCCESS; /* No link detected */
1665 mac->get_link_status = FALSE;
1667 switch (hw->mac.type) {
1669 ret_val = e1000_k1_workaround_lv(hw);
1674 if (hw->phy.type == e1000_phy_82578) {
1675 ret_val = e1000_link_stall_workaround_hv(hw);
1680 /* Workaround for PCHx parts in half-duplex:
1681 * Set the number of preambles removed from the packet
1682 * when it is passed from the PHY to the MAC to prevent
1683 * the MAC from misinterpreting the packet type.
1685 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1686 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1688 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1690 phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1692 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1698 /* Check if there was DownShift, must be checked
1699 * immediately after link-up
1701 e1000_check_downshift_generic(hw);
1703 /* Enable/Disable EEE after link up */
1704 if (hw->phy.type > e1000_phy_82579) {
1705 ret_val = e1000_set_eee_pchlan(hw);
1710 /* If we are forcing speed/duplex, then we simply return since
1711 * we have already determined whether we have link or not.
1714 return -E1000_ERR_CONFIG;
1716 /* Auto-Neg is enabled. Auto Speed Detection takes care
1717 * of MAC speed/duplex configuration. So we only need to
1718 * configure Collision Distance in the MAC.
1720 mac->ops.config_collision_dist(hw);
1722 /* Configure Flow Control now that Auto-Neg has completed.
1723 * First, we need to restore the desired flow control
1724 * settings because we may have had to re-autoneg with a
1725 * different link partner.
1727 ret_val = e1000_config_fc_after_link_up_generic(hw);
1729 DEBUGOUT("Error configuring flow control\n");
1735 * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1736 * @hw: pointer to the HW structure
1738 * Initialize family-specific function pointers for PHY, MAC, and NVM.
1740 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1742 DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1744 hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1745 hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1746 switch (hw->mac.type) {
1749 case e1000_ich10lan:
1750 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1756 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1764 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1765 * @hw: pointer to the HW structure
1767 * Acquires the mutex for performing NVM operations.
1769 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1771 DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1772 return E1000_SUCCESS;
1776 * e1000_release_nvm_ich8lan - Release NVM mutex
1777 * @hw: pointer to the HW structure
1779 * Releases the mutex used while performing NVM operations.
1781 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1783 DEBUGFUNC("e1000_release_nvm_ich8lan");
1788 * e1000_acquire_swflag_ich8lan - Acquire software control flag
1789 * @hw: pointer to the HW structure
1791 * Acquires the software control flag for performing PHY and select
1794 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1796 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1797 s32 ret_val = E1000_SUCCESS;
1799 DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1802 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1803 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1811 DEBUGOUT("SW has already locked the resource.\n");
1812 ret_val = -E1000_ERR_CONFIG;
1816 timeout = SW_FLAG_TIMEOUT;
1818 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1819 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1822 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1823 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1831 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1832 E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1833 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1834 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1835 ret_val = -E1000_ERR_CONFIG;
1844 * e1000_release_swflag_ich8lan - Release software control flag
1845 * @hw: pointer to the HW structure
1847 * Releases the software control flag for performing PHY and select
1850 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1854 DEBUGFUNC("e1000_release_swflag_ich8lan");
1856 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1858 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1859 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1860 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1862 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1868 * e1000_check_mng_mode_ich8lan - Checks management mode
1869 * @hw: pointer to the HW structure
1871 * This checks if the adapter has any manageability enabled.
1872 * This is a function pointer entry point only called by read/write
1873 * routines for the PHY and NVM parts.
1875 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1879 DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1881 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1883 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1884 ((fwsm & E1000_FWSM_MODE_MASK) ==
1885 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1889 * e1000_check_mng_mode_pchlan - Checks management mode
1890 * @hw: pointer to the HW structure
1892 * This checks if the adapter has iAMT enabled.
1893 * This is a function pointer entry point only called by read/write
1894 * routines for the PHY and NVM parts.
1896 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1900 DEBUGFUNC("e1000_check_mng_mode_pchlan");
1902 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1904 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1905 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1909 * e1000_rar_set_pch2lan - Set receive address register
1910 * @hw: pointer to the HW structure
1911 * @addr: pointer to the receive address
1912 * @index: receive address array register
1914 * Sets the receive address array register at index to the address passed
1915 * in by addr. For 82579, RAR[0] is the base address register that is to
1916 * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1917 * Use SHRA[0-3] in place of those reserved for ME.
1919 static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1921 u32 rar_low, rar_high;
1923 DEBUGFUNC("e1000_rar_set_pch2lan");
1925 /* HW expects these in little endian so we reverse the byte order
1926 * from network order (big endian) to little endian
1928 rar_low = ((u32) addr[0] |
1929 ((u32) addr[1] << 8) |
1930 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1932 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1934 /* If MAC address zero, no need to set the AV bit */
1935 if (rar_low || rar_high)
1936 rar_high |= E1000_RAH_AV;
1939 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1940 E1000_WRITE_FLUSH(hw);
1941 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1942 E1000_WRITE_FLUSH(hw);
1943 return E1000_SUCCESS;
1946 /* RAR[1-6] are owned by manageability. Skip those and program the
1947 * next address into the SHRA register array.
1949 if (index < (u32) (hw->mac.rar_entry_count)) {
1952 ret_val = e1000_acquire_swflag_ich8lan(hw);
1956 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
1957 E1000_WRITE_FLUSH(hw);
1958 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
1959 E1000_WRITE_FLUSH(hw);
1961 e1000_release_swflag_ich8lan(hw);
1963 /* verify the register updates */
1964 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
1965 (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
1966 return E1000_SUCCESS;
1968 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1969 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
1973 DEBUGOUT1("Failed to write receive address at index %d\n", index);
1974 return -E1000_ERR_CONFIG;
1978 * e1000_rar_set_pch_lpt - Set receive address registers
1979 * @hw: pointer to the HW structure
1980 * @addr: pointer to the receive address
1981 * @index: receive address array register
1983 * Sets the receive address register array at index to the address passed
1984 * in by addr. For LPT, RAR[0] is the base address register that is to
1985 * contain the MAC address. SHRA[0-10] are the shared receive address
1986 * registers that are shared between the Host and manageability engine (ME).
1988 static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1990 u32 rar_low, rar_high;
1993 DEBUGFUNC("e1000_rar_set_pch_lpt");
1995 /* HW expects these in little endian so we reverse the byte order
1996 * from network order (big endian) to little endian
1998 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
1999 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2001 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2003 /* If MAC address zero, no need to set the AV bit */
2004 if (rar_low || rar_high)
2005 rar_high |= E1000_RAH_AV;
2008 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2009 E1000_WRITE_FLUSH(hw);
2010 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2011 E1000_WRITE_FLUSH(hw);
2012 return E1000_SUCCESS;
2015 /* The manageability engine (ME) can lock certain SHRAR registers that
2016 * it is using - those registers are unavailable for use.
2018 if (index < hw->mac.rar_entry_count) {
2019 wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
2020 E1000_FWSM_WLOCK_MAC_MASK;
2021 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
2023 /* Check if all SHRAR registers are locked */
2027 if ((wlock_mac == 0) || (index <= wlock_mac)) {
2030 ret_val = e1000_acquire_swflag_ich8lan(hw);
2035 E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
2037 E1000_WRITE_FLUSH(hw);
2038 E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
2040 E1000_WRITE_FLUSH(hw);
2042 e1000_release_swflag_ich8lan(hw);
2044 /* verify the register updates */
2045 if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
2046 (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
2047 return E1000_SUCCESS;
2052 DEBUGOUT1("Failed to write receive address at index %d\n", index);
2053 return -E1000_ERR_CONFIG;
2057 * e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
2058 * @hw: pointer to the HW structure
2059 * @mc_addr_list: array of multicast addresses to program
2060 * @mc_addr_count: number of multicast addresses to program
2062 * Updates entire Multicast Table Array of the PCH2 MAC and PHY.
2063 * The caller must have a packed mc_addr_list of multicast addresses.
2065 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
2073 DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
2075 e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
2077 ret_val = hw->phy.ops.acquire(hw);
2081 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2085 for (i = 0; i < hw->mac.mta_reg_count; i++) {
2086 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
2087 (u16)(hw->mac.mta_shadow[i] &
2089 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
2090 (u16)((hw->mac.mta_shadow[i] >> 16) &
2094 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2097 hw->phy.ops.release(hw);
2101 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
2102 * @hw: pointer to the HW structure
2104 * Checks if firmware is blocking the reset of the PHY.
2105 * This is a function pointer entry point only called by
2108 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
2111 bool blocked = FALSE;
2114 DEBUGFUNC("e1000_check_reset_block_ich8lan");
2117 fwsm = E1000_READ_REG(hw, E1000_FWSM);
2118 if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
2124 } while (blocked && (i++ < 10));
2125 return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
2129 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
2130 * @hw: pointer to the HW structure
2132 * Assumes semaphore already acquired.
2135 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2138 u32 strap = E1000_READ_REG(hw, E1000_STRAP);
2139 u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2140 E1000_STRAP_SMT_FREQ_SHIFT;
2143 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2145 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2149 phy_data &= ~HV_SMB_ADDR_MASK;
2150 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2151 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2153 if (hw->phy.type == e1000_phy_i217) {
2154 /* Restore SMBus frequency */
2156 phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2157 phy_data |= (freq & (1 << 0)) <<
2158 HV_SMB_ADDR_FREQ_LOW_SHIFT;
2159 phy_data |= (freq & (1 << 1)) <<
2160 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2162 DEBUGOUT("Unsupported SMB frequency in PHY\n");
2166 return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2170 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2171 * @hw: pointer to the HW structure
2173 * SW should configure the LCD from the NVM extended configuration region
2174 * as a workaround for certain parts.
2176 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2178 struct e1000_phy_info *phy = &hw->phy;
2179 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2180 s32 ret_val = E1000_SUCCESS;
2181 u16 word_addr, reg_data, reg_addr, phy_page = 0;
2183 DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2185 /* Initialize the PHY from the NVM on ICH platforms. This
2186 * is needed due to an issue where the NVM configuration is
2187 * not properly autoloaded after power transitions.
2188 * Therefore, after each PHY reset, we will load the
2189 * configuration data out of the NVM manually.
2191 switch (hw->mac.type) {
2193 if (phy->type != e1000_phy_igp_3)
2196 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2197 (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2198 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2206 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2212 ret_val = hw->phy.ops.acquire(hw);
2216 data = E1000_READ_REG(hw, E1000_FEXTNVM);
2217 if (!(data & sw_cfg_mask))
2220 /* Make sure HW does not configure LCD from PHY
2221 * extended configuration before SW configuration
2223 data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2224 if ((hw->mac.type < e1000_pch2lan) &&
2225 (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2228 cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2229 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2230 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2234 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2235 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2237 if (((hw->mac.type == e1000_pchlan) &&
2238 !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2239 (hw->mac.type > e1000_pchlan)) {
2240 /* HW configures the SMBus address and LEDs when the
2241 * OEM and LCD Write Enable bits are set in the NVM.
2242 * When both NVM bits are cleared, SW will configure
2245 ret_val = e1000_write_smbus_addr(hw);
2249 data = E1000_READ_REG(hw, E1000_LEDCTL);
2250 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2256 /* Configure LCD from extended configuration region. */
2258 /* cnf_base_addr is in DWORD */
2259 word_addr = (u16)(cnf_base_addr << 1);
2261 for (i = 0; i < cnf_size; i++) {
2262 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2267 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2272 /* Save off the PHY page for future writes. */
2273 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2274 phy_page = reg_data;
2278 reg_addr &= PHY_REG_MASK;
2279 reg_addr |= phy_page;
2281 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2288 hw->phy.ops.release(hw);
2293 * e1000_k1_gig_workaround_hv - K1 Si workaround
2294 * @hw: pointer to the HW structure
2295 * @link: link up bool flag
2297 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2298 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
2299 * If link is down, the function will restore the default K1 setting located
2302 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2304 s32 ret_val = E1000_SUCCESS;
2306 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2308 DEBUGFUNC("e1000_k1_gig_workaround_hv");
2310 if (hw->mac.type != e1000_pchlan)
2311 return E1000_SUCCESS;
2313 /* Wrap the whole flow with the sw flag */
2314 ret_val = hw->phy.ops.acquire(hw);
2318 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2320 if (hw->phy.type == e1000_phy_82578) {
2321 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2326 status_reg &= (BM_CS_STATUS_LINK_UP |
2327 BM_CS_STATUS_RESOLVED |
2328 BM_CS_STATUS_SPEED_MASK);
2330 if (status_reg == (BM_CS_STATUS_LINK_UP |
2331 BM_CS_STATUS_RESOLVED |
2332 BM_CS_STATUS_SPEED_1000))
2336 if (hw->phy.type == e1000_phy_82577) {
2337 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2342 status_reg &= (HV_M_STATUS_LINK_UP |
2343 HV_M_STATUS_AUTONEG_COMPLETE |
2344 HV_M_STATUS_SPEED_MASK);
2346 if (status_reg == (HV_M_STATUS_LINK_UP |
2347 HV_M_STATUS_AUTONEG_COMPLETE |
2348 HV_M_STATUS_SPEED_1000))
2352 /* Link stall fix for link up */
2353 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2359 /* Link stall fix for link down */
2360 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2366 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2369 hw->phy.ops.release(hw);
2375 * e1000_configure_k1_ich8lan - Configure K1 power state
2376 * @hw: pointer to the HW structure
2377 * @enable: K1 state to configure
2379 * Configure the K1 power state based on the provided parameter.
2380 * Assumes semaphore already acquired.
2382 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2384 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2392 DEBUGFUNC("e1000_configure_k1_ich8lan");
2394 ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2400 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2402 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2404 ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2410 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2411 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2413 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2414 reg |= E1000_CTRL_FRCSPD;
2415 E1000_WRITE_REG(hw, E1000_CTRL, reg);
2417 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2418 E1000_WRITE_FLUSH(hw);
2420 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2421 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2422 E1000_WRITE_FLUSH(hw);
2425 return E1000_SUCCESS;
2429 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2430 * @hw: pointer to the HW structure
2431 * @d0_state: boolean if entering d0 or d3 device state
2433 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2434 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
2435 * in NVM determines whether HW should configure LPLU and Gbe Disable.
2437 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2443 DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2445 if (hw->mac.type < e1000_pchlan)
2448 ret_val = hw->phy.ops.acquire(hw);
2452 if (hw->mac.type == e1000_pchlan) {
2453 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2454 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2458 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2459 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2462 mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2464 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2468 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2471 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2472 oem_reg |= HV_OEM_BITS_GBE_DIS;
2474 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2475 oem_reg |= HV_OEM_BITS_LPLU;
2477 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2478 E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2479 oem_reg |= HV_OEM_BITS_GBE_DIS;
2481 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2482 E1000_PHY_CTRL_NOND0A_LPLU))
2483 oem_reg |= HV_OEM_BITS_LPLU;
2486 /* Set Restart auto-neg to activate the bits */
2487 if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2488 !hw->phy.ops.check_reset_block(hw))
2489 oem_reg |= HV_OEM_BITS_RESTART_AN;
2491 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2494 hw->phy.ops.release(hw);
2501 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2502 * @hw: pointer to the HW structure
2504 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2509 DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2511 ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2515 data |= HV_KMRN_MDIO_SLOW;
2517 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2523 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2524 * done after every PHY reset.
2526 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2528 s32 ret_val = E1000_SUCCESS;
2531 DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2533 if (hw->mac.type != e1000_pchlan)
2534 return E1000_SUCCESS;
2536 /* Set MDIO slow mode before any other MDIO access */
2537 if (hw->phy.type == e1000_phy_82577) {
2538 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2543 if (((hw->phy.type == e1000_phy_82577) &&
2544 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2545 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2546 /* Disable generation of early preamble */
2547 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2551 /* Preamble tuning for SSC */
2552 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2558 if (hw->phy.type == e1000_phy_82578) {
2559 /* Return registers to default by doing a soft reset then
2560 * writing 0x3140 to the control register.
2562 if (hw->phy.revision < 2) {
2563 e1000_phy_sw_reset_generic(hw);
2564 ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2570 ret_val = hw->phy.ops.acquire(hw);
2575 ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2576 hw->phy.ops.release(hw);
2580 /* Configure the K1 Si workaround during phy reset assuming there is
2581 * link so that it disables K1 if link is in 1Gbps.
2583 ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
2587 /* Workaround for link disconnects on a busy hub in half duplex */
2588 ret_val = hw->phy.ops.acquire(hw);
2591 ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2594 ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2599 /* set MSE higher to enable link to stay up when noise is high */
2600 ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2602 hw->phy.ops.release(hw);
2608 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2609 * @hw: pointer to the HW structure
2611 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2617 DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2619 ret_val = hw->phy.ops.acquire(hw);
2622 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2626 /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2627 for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2628 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2629 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2630 (u16)(mac_reg & 0xFFFF));
2631 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2632 (u16)((mac_reg >> 16) & 0xFFFF));
2634 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2635 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2636 (u16)(mac_reg & 0xFFFF));
2637 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2638 (u16)((mac_reg & E1000_RAH_AV)
2642 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2645 hw->phy.ops.release(hw);
2648 static u32 e1000_calc_rx_da_crc(u8 mac[])
2650 u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
2651 u32 i, j, mask, crc;
2653 DEBUGFUNC("e1000_calc_rx_da_crc");
2656 for (i = 0; i < 6; i++) {
2658 for (j = 8; j > 0; j--) {
2659 mask = (crc & 1) * (-1);
2660 crc = (crc >> 1) ^ (poly & mask);
2667 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2669 * @hw: pointer to the HW structure
2670 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
2672 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2674 s32 ret_val = E1000_SUCCESS;
2679 DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2681 if (hw->mac.type < e1000_pch2lan)
2682 return E1000_SUCCESS;
2684 /* disable Rx path while enabling/disabling workaround */
2685 hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2686 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2687 phy_reg | (1 << 14));
2692 /* Write Rx addresses (rar_entry_count for RAL/H, and
2693 * SHRAL/H) and initial CRC values to the MAC
2695 for (i = 0; i < hw->mac.rar_entry_count; i++) {
2696 u8 mac_addr[ETH_ADDR_LEN] = {0};
2697 u32 addr_high, addr_low;
2699 addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2700 if (!(addr_high & E1000_RAH_AV))
2702 addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2703 mac_addr[0] = (addr_low & 0xFF);
2704 mac_addr[1] = ((addr_low >> 8) & 0xFF);
2705 mac_addr[2] = ((addr_low >> 16) & 0xFF);
2706 mac_addr[3] = ((addr_low >> 24) & 0xFF);
2707 mac_addr[4] = (addr_high & 0xFF);
2708 mac_addr[5] = ((addr_high >> 8) & 0xFF);
2710 E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2711 e1000_calc_rx_da_crc(mac_addr));
2714 /* Write Rx addresses to the PHY */
2715 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2717 /* Enable jumbo frame workaround in the MAC */
2718 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2719 mac_reg &= ~(1 << 14);
2720 mac_reg |= (7 << 15);
2721 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2723 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2724 mac_reg |= E1000_RCTL_SECRC;
2725 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2727 ret_val = e1000_read_kmrn_reg_generic(hw,
2728 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2732 ret_val = e1000_write_kmrn_reg_generic(hw,
2733 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2737 ret_val = e1000_read_kmrn_reg_generic(hw,
2738 E1000_KMRNCTRLSTA_HD_CTRL,
2742 data &= ~(0xF << 8);
2744 ret_val = e1000_write_kmrn_reg_generic(hw,
2745 E1000_KMRNCTRLSTA_HD_CTRL,
2750 /* Enable jumbo frame workaround in the PHY */
2751 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2752 data &= ~(0x7F << 5);
2753 data |= (0x37 << 5);
2754 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2757 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2759 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2762 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2763 data &= ~(0x3FF << 2);
2764 data |= (E1000_TX_PTR_GAP << 2);
2765 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2768 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2771 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2772 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2777 /* Write MAC register values back to h/w defaults */
2778 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2779 mac_reg &= ~(0xF << 14);
2780 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2782 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2783 mac_reg &= ~E1000_RCTL_SECRC;
2784 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2786 ret_val = e1000_read_kmrn_reg_generic(hw,
2787 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2791 ret_val = e1000_write_kmrn_reg_generic(hw,
2792 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2796 ret_val = e1000_read_kmrn_reg_generic(hw,
2797 E1000_KMRNCTRLSTA_HD_CTRL,
2801 data &= ~(0xF << 8);
2803 ret_val = e1000_write_kmrn_reg_generic(hw,
2804 E1000_KMRNCTRLSTA_HD_CTRL,
2809 /* Write PHY register values back to h/w defaults */
2810 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2811 data &= ~(0x7F << 5);
2812 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2815 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2817 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2820 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2821 data &= ~(0x3FF << 2);
2823 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2826 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2829 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2830 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2836 /* re-enable Rx path after enabling/disabling workaround */
2837 return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2842 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2843 * done after every PHY reset.
2845 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2847 s32 ret_val = E1000_SUCCESS;
2849 DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2851 if (hw->mac.type != e1000_pch2lan)
2852 return E1000_SUCCESS;
2854 /* Set MDIO slow mode before any other MDIO access */
2855 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2859 ret_val = hw->phy.ops.acquire(hw);
2862 /* set MSE higher to enable link to stay up when noise is high */
2863 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2866 /* drop link after 5 times MSE threshold was reached */
2867 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2869 hw->phy.ops.release(hw);
2875 * e1000_k1_gig_workaround_lv - K1 Si workaround
2876 * @hw: pointer to the HW structure
2878 * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2879 * Disable K1 for 1000 and 100 speeds
2881 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2883 s32 ret_val = E1000_SUCCESS;
2886 DEBUGFUNC("e1000_k1_workaround_lv");
2888 if (hw->mac.type != e1000_pch2lan)
2889 return E1000_SUCCESS;
2891 /* Set K1 beacon duration based on 10Mbs speed */
2892 ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2896 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2897 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2899 (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2902 /* LV 1G/100 Packet drop issue wa */
2903 ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2907 pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2908 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2914 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
2915 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2916 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2917 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2925 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2926 * @hw: pointer to the HW structure
2927 * @gate: boolean set to TRUE to gate, FALSE to ungate
2929 * Gate/ungate the automatic PHY configuration via hardware; perform
2930 * the configuration via software instead.
2932 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2936 DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
2938 if (hw->mac.type < e1000_pch2lan)
2941 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2944 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2946 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2948 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
2952 * e1000_lan_init_done_ich8lan - Check for PHY config completion
2953 * @hw: pointer to the HW structure
2955 * Check the appropriate indication the MAC has finished configuring the
2956 * PHY after a software reset.
2958 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2960 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2962 DEBUGFUNC("e1000_lan_init_done_ich8lan");
2964 /* Wait for basic configuration completes before proceeding */
2966 data = E1000_READ_REG(hw, E1000_STATUS);
2967 data &= E1000_STATUS_LAN_INIT_DONE;
2969 } while ((!data) && --loop);
2971 /* If basic configuration is incomplete before the above loop
2972 * count reaches 0, loading the configuration from NVM will
2973 * leave the PHY in a bad state possibly resulting in no link.
2976 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
2978 /* Clear the Init Done bit for the next init event */
2979 data = E1000_READ_REG(hw, E1000_STATUS);
2980 data &= ~E1000_STATUS_LAN_INIT_DONE;
2981 E1000_WRITE_REG(hw, E1000_STATUS, data);
2985 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2986 * @hw: pointer to the HW structure
2988 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2990 s32 ret_val = E1000_SUCCESS;
2993 DEBUGFUNC("e1000_post_phy_reset_ich8lan");
2995 if (hw->phy.ops.check_reset_block(hw))
2996 return E1000_SUCCESS;
2998 /* Allow time for h/w to get to quiescent state after reset */
3001 /* Perform any necessary post-reset workarounds */
3002 switch (hw->mac.type) {
3004 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
3009 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
3017 /* Clear the host wakeup bit after lcd reset */
3018 if (hw->mac.type >= e1000_pchlan) {
3019 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, ®);
3020 reg &= ~BM_WUC_HOST_WU_BIT;
3021 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
3024 /* Configure the LCD with the extended configuration region in NVM */
3025 ret_val = e1000_sw_lcd_config_ich8lan(hw);
3029 /* Configure the LCD with the OEM bits in NVM */
3030 ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
3032 if (hw->mac.type == e1000_pch2lan) {
3033 /* Ungate automatic PHY configuration on non-managed 82579 */
3034 if (!(E1000_READ_REG(hw, E1000_FWSM) &
3035 E1000_ICH_FWSM_FW_VALID)) {
3037 e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
3040 /* Set EEE LPI Update Timer to 200usec */
3041 ret_val = hw->phy.ops.acquire(hw);
3044 ret_val = e1000_write_emi_reg_locked(hw,
3045 I82579_LPI_UPDATE_TIMER,
3047 hw->phy.ops.release(hw);
3054 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
3055 * @hw: pointer to the HW structure
3058 * This is a function pointer entry point called by drivers
3059 * or other shared routines.
3061 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
3063 s32 ret_val = E1000_SUCCESS;
3065 DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
3067 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
3068 if ((hw->mac.type == e1000_pch2lan) &&
3069 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3070 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
3072 ret_val = e1000_phy_hw_reset_generic(hw);
3076 return e1000_post_phy_reset_ich8lan(hw);
3080 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
3081 * @hw: pointer to the HW structure
3082 * @active: TRUE to enable LPLU, FALSE to disable
3084 * Sets the LPLU state according to the active flag. For PCH, if OEM write
3085 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
3086 * the phy speed. This function will manually set the LPLU bit and restart
3087 * auto-neg as hw would do. D3 and D0 LPLU will call the same function
3088 * since it configures the same bit.
3090 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
3095 DEBUGFUNC("e1000_set_lplu_state_pchlan");
3097 ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
3102 oem_reg |= HV_OEM_BITS_LPLU;
3104 oem_reg &= ~HV_OEM_BITS_LPLU;
3106 if (!hw->phy.ops.check_reset_block(hw))
3107 oem_reg |= HV_OEM_BITS_RESTART_AN;
3109 return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
3113 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
3114 * @hw: pointer to the HW structure
3115 * @active: TRUE to enable LPLU, FALSE to disable
3117 * Sets the LPLU D0 state according to the active flag. When
3118 * activating LPLU this function also disables smart speed
3119 * and vice versa. LPLU will not be activated unless the
3120 * device autonegotiation advertisement meets standards of
3121 * either 10 or 10/100 or 10/100/1000 at all duplexes.
3122 * This is a function pointer entry point only called by
3123 * PHY setup routines.
3125 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3127 struct e1000_phy_info *phy = &hw->phy;
3129 s32 ret_val = E1000_SUCCESS;
3132 DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
3134 if (phy->type == e1000_phy_ife)
3135 return E1000_SUCCESS;
3137 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3140 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3141 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3143 if (phy->type != e1000_phy_igp_3)
3144 return E1000_SUCCESS;
3146 /* Call gig speed drop workaround on LPLU before accessing
3149 if (hw->mac.type == e1000_ich8lan)
3150 e1000_gig_downshift_workaround_ich8lan(hw);
3152 /* When LPLU is enabled, we should disable SmartSpeed */
3153 ret_val = phy->ops.read_reg(hw,
3154 IGP01E1000_PHY_PORT_CONFIG,
3158 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3159 ret_val = phy->ops.write_reg(hw,
3160 IGP01E1000_PHY_PORT_CONFIG,
3165 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3166 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3168 if (phy->type != e1000_phy_igp_3)
3169 return E1000_SUCCESS;
3171 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
3172 * during Dx states where the power conservation is most
3173 * important. During driver activity we should enable
3174 * SmartSpeed, so performance is maintained.
3176 if (phy->smart_speed == e1000_smart_speed_on) {
3177 ret_val = phy->ops.read_reg(hw,
3178 IGP01E1000_PHY_PORT_CONFIG,
3183 data |= IGP01E1000_PSCFR_SMART_SPEED;
3184 ret_val = phy->ops.write_reg(hw,
3185 IGP01E1000_PHY_PORT_CONFIG,
3189 } else if (phy->smart_speed == e1000_smart_speed_off) {
3190 ret_val = phy->ops.read_reg(hw,
3191 IGP01E1000_PHY_PORT_CONFIG,
3196 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3197 ret_val = phy->ops.write_reg(hw,
3198 IGP01E1000_PHY_PORT_CONFIG,
3205 return E1000_SUCCESS;
3209 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3210 * @hw: pointer to the HW structure
3211 * @active: TRUE to enable LPLU, FALSE to disable
3213 * Sets the LPLU D3 state according to the active flag. When
3214 * activating LPLU this function also disables smart speed
3215 * and vice versa. LPLU will not be activated unless the
3216 * device autonegotiation advertisement meets standards of
3217 * either 10 or 10/100 or 10/100/1000 at all duplexes.
3218 * This is a function pointer entry point only called by
3219 * PHY setup routines.
3221 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3223 struct e1000_phy_info *phy = &hw->phy;
3225 s32 ret_val = E1000_SUCCESS;
3228 DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3230 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3233 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3234 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3236 if (phy->type != e1000_phy_igp_3)
3237 return E1000_SUCCESS;
3239 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
3240 * during Dx states where the power conservation is most
3241 * important. During driver activity we should enable
3242 * SmartSpeed, so performance is maintained.
3244 if (phy->smart_speed == e1000_smart_speed_on) {
3245 ret_val = phy->ops.read_reg(hw,
3246 IGP01E1000_PHY_PORT_CONFIG,
3251 data |= IGP01E1000_PSCFR_SMART_SPEED;
3252 ret_val = phy->ops.write_reg(hw,
3253 IGP01E1000_PHY_PORT_CONFIG,
3257 } else if (phy->smart_speed == e1000_smart_speed_off) {
3258 ret_val = phy->ops.read_reg(hw,
3259 IGP01E1000_PHY_PORT_CONFIG,
3264 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3265 ret_val = phy->ops.write_reg(hw,
3266 IGP01E1000_PHY_PORT_CONFIG,
3271 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3272 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3273 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3274 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3275 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3277 if (phy->type != e1000_phy_igp_3)
3278 return E1000_SUCCESS;
3280 /* Call gig speed drop workaround on LPLU before accessing
3283 if (hw->mac.type == e1000_ich8lan)
3284 e1000_gig_downshift_workaround_ich8lan(hw);
3286 /* When LPLU is enabled, we should disable SmartSpeed */
3287 ret_val = phy->ops.read_reg(hw,
3288 IGP01E1000_PHY_PORT_CONFIG,
3293 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3294 ret_val = phy->ops.write_reg(hw,
3295 IGP01E1000_PHY_PORT_CONFIG,
3303 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3304 * @hw: pointer to the HW structure
3305 * @bank: pointer to the variable that returns the active bank
3307 * Reads signature byte from the NVM using the flash access registers.
3308 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3310 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3313 struct e1000_nvm_info *nvm = &hw->nvm;
3314 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3315 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3319 DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3321 switch (hw->mac.type) {
3323 *bank = E1000_READ_REG(hw, E1000_CTRL_EXT) & E1000_CTRL_EXT_NVMVS;
3324 if (*bank == 0 || *bank == 1) {
3325 return -E1000_ERR_NVM;
3333 eecd = E1000_READ_REG(hw, E1000_EECD);
3334 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3335 E1000_EECD_SEC1VAL_VALID_MASK) {
3336 if (eecd & E1000_EECD_SEC1VAL)
3341 return E1000_SUCCESS;
3343 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3346 /* set bank to 0 in case flash read fails */
3350 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3354 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3355 E1000_ICH_NVM_SIG_VALUE) {
3357 return E1000_SUCCESS;
3361 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3366 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3367 E1000_ICH_NVM_SIG_VALUE) {
3369 return E1000_SUCCESS;
3372 DEBUGOUT("ERROR: No valid NVM bank present\n");
3373 return -E1000_ERR_NVM;
3378 * e1000_read_nvm_spt - Read word(s) from the NVM
3379 * @hw: pointer to the HW structure
3380 * @offset: The offset (in bytes) of the word(s) to read.
3381 * @words: Size of data to read in words
3382 * @data: Pointer to the word(s) to read at offset.
3384 * Reads a word(s) from the NVM using the flash access registers.
3386 static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
3389 struct e1000_nvm_info *nvm = &hw->nvm;
3390 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3392 s32 ret_val = E1000_SUCCESS;
3398 DEBUGFUNC("e1000_read_nvm_spt");
3400 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3402 DEBUGOUT("nvm parameter(s) out of bounds\n");
3403 ret_val = -E1000_ERR_NVM;
3407 nvm->ops.acquire(hw);
3409 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3410 if (ret_val != E1000_SUCCESS) {
3411 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3415 act_offset = (bank) ? nvm->flash_bank_size : 0;
3416 act_offset += offset;
3418 ret_val = E1000_SUCCESS;
3419 for (i = 0; i < words; i += 2) {
3420 if (words - i == 1) {
3421 if (dev_spec->shadow_ram[offset+i].modified) {
3422 data[i] = dev_spec->shadow_ram[offset+i].value;
3424 use_offset = act_offset + i -
3425 (act_offset + i) % 2;
3426 ret_val = e1000_read_flash_dword_ich8lan(
3432 if ((act_offset + i) % 2 == 0)
3433 data[i] = (u16)(dword & 0xFFFF);
3435 data[i] = (u16)((dword >> 16) & 0xFFFF);
3438 use_offset = act_offset + i;
3439 if (!(dev_spec->shadow_ram[offset + i].modified) ||
3440 !(dev_spec->shadow_ram[offset + i + 1].modified)) {
3442 e1000_read_flash_dword_ich8lan(hw,
3443 use_offset, &dword);
3447 if (dev_spec->shadow_ram[offset + i].modified)
3448 data[i] = dev_spec->shadow_ram[offset + i].value;
3450 data[i] = (u16)(dword & 0xFFFF);
3451 if (dev_spec->shadow_ram[offset + i].modified)
3453 dev_spec->shadow_ram[offset + i + 1].value;
3455 data[i + 1] = (u16)(dword >> 16 & 0xFFFF);
3459 nvm->ops.release(hw);
3463 DEBUGOUT1("NVM read error: %d\n", ret_val);
3469 * e1000_read_nvm_ich8lan - Read word(s) from the NVM
3470 * @hw: pointer to the HW structure
3471 * @offset: The offset (in bytes) of the word(s) to read.
3472 * @words: Size of data to read in words
3473 * @data: Pointer to the word(s) to read at offset.
3475 * Reads a word(s) from the NVM using the flash access registers.
3477 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3480 struct e1000_nvm_info *nvm = &hw->nvm;
3481 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3483 s32 ret_val = E1000_SUCCESS;
3487 DEBUGFUNC("e1000_read_nvm_ich8lan");
3489 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3491 DEBUGOUT("nvm parameter(s) out of bounds\n");
3492 ret_val = -E1000_ERR_NVM;
3496 nvm->ops.acquire(hw);
3498 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3499 if (ret_val != E1000_SUCCESS) {
3500 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3504 act_offset = (bank) ? nvm->flash_bank_size : 0;
3505 act_offset += offset;
3507 ret_val = E1000_SUCCESS;
3508 for (i = 0; i < words; i++) {
3509 if (dev_spec->shadow_ram[offset+i].modified) {
3510 data[i] = dev_spec->shadow_ram[offset+i].value;
3512 ret_val = e1000_read_flash_word_ich8lan(hw,
3521 nvm->ops.release(hw);
3525 DEBUGOUT1("NVM read error: %d\n", ret_val);
3531 * e1000_flash_cycle_init_ich8lan - Initialize flash
3532 * @hw: pointer to the HW structure
3534 * This function does initial flash setup so that a new read/write/erase cycle
3537 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3539 union ich8_hws_flash_status hsfsts;
3540 s32 ret_val = -E1000_ERR_NVM;
3542 DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3544 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3546 /* Check if the flash descriptor is valid */
3547 if (!hsfsts.hsf_status.fldesvalid) {
3548 DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.\n");
3549 return -E1000_ERR_NVM;
3552 /* Clear FCERR and DAEL in hw status by writing 1 */
3553 hsfsts.hsf_status.flcerr = 1;
3554 hsfsts.hsf_status.dael = 1;
3555 if (hw->mac.type == e1000_pch_spt)
3556 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
3558 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3560 /* Either we should have a hardware SPI cycle in progress
3561 * bit to check against, in order to start a new cycle or
3562 * FDONE bit should be changed in the hardware so that it
3563 * is 1 after hardware reset, which can then be used as an
3564 * indication whether a cycle is in progress or has been
3568 if (!hsfsts.hsf_status.flcinprog) {
3569 /* There is no cycle running at present,
3570 * so we can start a cycle.
3571 * Begin by setting Flash Cycle Done.
3573 hsfsts.hsf_status.flcdone = 1;
3574 if (hw->mac.type == e1000_pch_spt)
3575 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
3577 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3578 ret_val = E1000_SUCCESS;
3582 /* Otherwise poll for sometime so the current
3583 * cycle has a chance to end before giving up.
3585 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3586 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3588 if (!hsfsts.hsf_status.flcinprog) {
3589 ret_val = E1000_SUCCESS;
3594 if (ret_val == E1000_SUCCESS) {
3595 /* Successful in waiting for previous cycle to timeout,
3596 * now set the Flash Cycle Done.
3598 hsfsts.hsf_status.flcdone = 1;
3599 if (hw->mac.type == e1000_pch_spt)
3600 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3601 hsfsts.regval & 0xFFFF);
3603 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3606 DEBUGOUT("Flash controller busy, cannot get access\n");
3614 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3615 * @hw: pointer to the HW structure
3616 * @timeout: maximum time to wait for completion
3618 * This function starts a flash cycle and waits for its completion.
3620 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3622 union ich8_hws_flash_ctrl hsflctl;
3623 union ich8_hws_flash_status hsfsts;
3626 DEBUGFUNC("e1000_flash_cycle_ich8lan");
3628 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3629 if (hw->mac.type == e1000_pch_spt)
3630 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS) >> 16;
3632 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3633 hsflctl.hsf_ctrl.flcgo = 1;
3635 if (hw->mac.type == e1000_pch_spt)
3636 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, hsflctl.regval << 16);
3638 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3640 /* wait till FDONE bit is set to 1 */
3642 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3643 if (hsfsts.hsf_status.flcdone)
3646 } while (i++ < timeout);
3648 if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3649 return E1000_SUCCESS;
3651 return -E1000_ERR_NVM;
3655 * e1000_read_flash_word_ich8lan - Read word from flash
3656 * @hw: pointer to the HW structure
3657 * @offset: offset to data location
3658 * @data: pointer to the location for storing the data
3660 * Reads the flash word at offset into data. Offset is converted
3661 * to bytes before read.
3663 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3666 DEBUGFUNC("e1000_read_flash_word_ich8lan");
3669 return -E1000_ERR_NVM;
3671 /* Must convert offset into bytes. */
3674 return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3678 * e1000_read_flash_dword_ich8lan - Read dword from flash
3679 * @hw: pointer to the HW structure
3680 * @offset: offset to data location
3681 * @data: pointer to the location for storing the data
3683 * Reads the flash word at offset into data. Offset is converted
3684 * to bytes before read.
3686 static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
3689 DEBUGFUNC("e1000_read_flash_dword_ich8lan");
3692 return -E1000_ERR_NVM;
3694 /* Must convert offset into bytes. */
3697 return e1000_read_flash_data32_ich8lan(hw, offset, data);
3701 * e1000_read_flash_byte_ich8lan - Read byte from flash
3702 * @hw: pointer to the HW structure
3703 * @offset: The offset of the byte to read.
3704 * @data: Pointer to a byte to store the value read.
3706 * Reads a single byte from the NVM using the flash access registers.
3708 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3714 if (hw->mac.type == e1000_pch_spt)
3715 return -E1000_ERR_NVM;
3716 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3723 return E1000_SUCCESS;
3727 * e1000_read_flash_data_ich8lan - Read byte or word from NVM
3728 * @hw: pointer to the HW structure
3729 * @offset: The offset (in bytes) of the byte or word to read.
3730 * @size: Size of data to read, 1=byte 2=word
3731 * @data: Pointer to the word to store the value read.
3733 * Reads a byte or word from the NVM using the flash access registers.
3735 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3738 union ich8_hws_flash_status hsfsts;
3739 union ich8_hws_flash_ctrl hsflctl;
3740 u32 flash_linear_addr;
3742 s32 ret_val = -E1000_ERR_NVM;
3745 DEBUGFUNC("e1000_read_flash_data_ich8lan");
3747 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3748 return -E1000_ERR_NVM;
3749 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3750 hw->nvm.flash_base_addr);
3755 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3756 if (ret_val != E1000_SUCCESS)
3758 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3760 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3761 hsflctl.hsf_ctrl.fldbcount = size - 1;
3762 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3763 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3764 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3766 ret_val = e1000_flash_cycle_ich8lan(hw,
3767 ICH_FLASH_READ_COMMAND_TIMEOUT);
3769 /* Check if FCERR is set to 1, if set to 1, clear it
3770 * and try the whole sequence a few more times, else
3771 * read in (shift in) the Flash Data0, the order is
3772 * least significant byte first msb to lsb
3774 if (ret_val == E1000_SUCCESS) {
3775 flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3777 *data = (u8)(flash_data & 0x000000FF);
3779 *data = (u16)(flash_data & 0x0000FFFF);
3782 /* If we've gotten here, then things are probably
3783 * completely hosed, but if the error condition is
3784 * detected, it won't hurt to give it another try...
3785 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3787 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3789 if (hsfsts.hsf_status.flcerr) {
3790 /* Repeat for some time before giving up. */
3792 } else if (!hsfsts.hsf_status.flcdone) {
3793 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3797 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3803 * e1000_read_flash_data32_ich8lan - Read dword from NVM
3804 * @hw: pointer to the HW structure
3805 * @offset: The offset (in bytes) of the byte or word to read.
3806 * @size: Size of data to read, 1=byte 2=word
3807 * @data: Pointer to the word to store the value read.
3809 * Reads a byte or word from the NVM using the flash access registers.
3811 static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
3814 union ich8_hws_flash_status hsfsts;
3815 union ich8_hws_flash_ctrl hsflctl;
3816 u32 flash_linear_addr;
3817 s32 ret_val = -E1000_ERR_NVM;
3820 DEBUGFUNC("e1000_read_flash_data32_ich8lan");
3822 if (offset > ICH_FLASH_LINEAR_ADDR_MASK ||
3823 hw->mac.type != e1000_pch_spt)
3824 return -E1000_ERR_NVM;
3826 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3827 hw->nvm.flash_base_addr);
3832 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3833 if (ret_val != E1000_SUCCESS)
3835 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS) >> 16;
3837 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3838 hsflctl.hsf_ctrl.fldbcount = sizeof(int32_t) - 1;
3839 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3840 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, hsflctl.regval << 16);
3841 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3843 ret_val = e1000_flash_cycle_ich8lan(hw,
3844 ICH_FLASH_READ_COMMAND_TIMEOUT);
3846 /* Check if FCERR is set to 1, if set to 1, clear it
3847 * and try the whole sequence a few more times, else
3848 * read in (shift in) the Flash Data0, the order is
3849 * least significant byte first msb to lsb
3851 if (ret_val == E1000_SUCCESS) {
3852 *data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3855 /* If we've gotten here, then things are probably
3856 * completely hosed, but if the error condition is
3857 * detected, it won't hurt to give it another try...
3858 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3860 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3862 if (hsfsts.hsf_status.flcerr) {
3863 /* Repeat for some time before giving up. */
3865 } else if (!hsfsts.hsf_status.flcdone) {
3866 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3870 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3877 * e1000_write_nvm_ich8lan - Write word(s) to the NVM
3878 * @hw: pointer to the HW structure
3879 * @offset: The offset (in bytes) of the word(s) to write.
3880 * @words: Size of data to write in words
3881 * @data: Pointer to the word(s) to write at offset.
3883 * Writes a byte or word to the NVM using the flash access registers.
3885 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3888 struct e1000_nvm_info *nvm = &hw->nvm;
3889 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3892 DEBUGFUNC("e1000_write_nvm_ich8lan");
3894 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3896 DEBUGOUT("nvm parameter(s) out of bounds\n");
3897 return -E1000_ERR_NVM;
3900 nvm->ops.acquire(hw);
3902 for (i = 0; i < words; i++) {
3903 dev_spec->shadow_ram[offset+i].modified = TRUE;
3904 dev_spec->shadow_ram[offset+i].value = data[i];
3907 nvm->ops.release(hw);
3909 return E1000_SUCCESS;
3913 * e1000_update_nvm_checksum_spt - Update the checksum for NVM
3914 * @hw: pointer to the HW structure
3916 * The NVM checksum is updated by calling the generic update_nvm_checksum,
3917 * which writes the checksum to the shadow ram. The changes in the shadow
3918 * ram are then committed to the EEPROM by processing each bank at a time
3919 * checking for the modified bit and writing only the pending changes.
3920 * After a successful commit, the shadow ram is cleared and is ready for
3923 static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
3925 struct e1000_nvm_info *nvm = &hw->nvm;
3926 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3927 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3931 DEBUGFUNC("e1000_update_nvm_checksum_spt");
3933 ret_val = e1000_update_nvm_checksum_generic(hw);
3937 if (nvm->type != e1000_nvm_flash_sw)
3940 nvm->ops.acquire(hw);
3942 /* We're writing to the opposite bank so if we're on bank 1,
3943 * write to bank 0 etc. We also need to erase the segment that
3944 * is going to be written
3946 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3947 if (ret_val != E1000_SUCCESS) {
3948 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3953 new_bank_offset = nvm->flash_bank_size;
3954 old_bank_offset = 0;
3955 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3959 old_bank_offset = nvm->flash_bank_size;
3960 new_bank_offset = 0;
3961 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3965 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i += 2) {
3966 /* Determine whether to write the value stored
3967 * in the other NVM bank or a modified value stored
3970 ret_val = e1000_read_flash_dword_ich8lan(hw,
3971 i + old_bank_offset,
3973 if (dev_spec->shadow_ram[i].modified) {
3974 data32 &= 0xFFFF0000;
3975 data32 |= dev_spec->shadow_ram[i].value & 0xffff;
3977 if (dev_spec->shadow_ram[i + 1].modified) {
3978 data32 &= 0x0000FFFF;
3979 data32 |= (dev_spec->shadow_ram[i + 1].value & 0xffff)
3985 /* If the word is 0x13, then make sure the signature bits
3986 * (15:14) are 11b until the commit has completed.
3987 * This will allow us to write 10b which indicates the
3988 * signature is valid. We want to do this after the write
3989 * has completed so that we don't mark the segment valid
3990 * while the write is still in progress
3992 if (i == E1000_ICH_NVM_SIG_WORD - 1)
3993 data32 |= E1000_ICH_NVM_SIG_MASK << 16;
3995 /* Convert offset to bytes. */
3996 /*act_offset = (i + new_bank_offset) << 1;*/
4000 /* Write the bytes to the new bank. */
4001 act_offset = i + new_bank_offset;
4002 ret_val = e1000_retry_write_flash_dword_ich8lan(hw,
4009 /* Don't bother writing the segment valid bits if sector
4010 * programming failed.
4013 DEBUGOUT("Flash commit failed.\n");
4017 /* Finally validate the new segment by setting bit 15:14
4018 * to 10b in word 0x13 , this can be done without an
4019 * erase as well since these bits are 11 to start with
4020 * and we need to change bit 14 to 0b
4022 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4024 /*offset in words but we read dword */
4027 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &data32);
4031 data32 &= 0xBFFFFFFF;
4032 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset * 2 + 1,
4037 /* And invalidate the previously valid segment by setting
4038 * its signature word (0x13) high_byte to 0b. This can be
4039 * done without an erase because flash erase sets all bits
4040 * to 1's. We can write 1's to 0's without an erase
4042 /*act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;*/
4044 /* offset in words but we read dwords */
4045 act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
4046 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &data32);
4051 /* Great! Everything worked, we can now clear the cached entries. */
4052 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4053 dev_spec->shadow_ram[i].modified = FALSE;
4054 dev_spec->shadow_ram[i].value = 0xFFFF;
4058 nvm->ops.release(hw);
4060 /* Reload the EEPROM, or else modifications will not appear
4061 * until after the next adapter reset.
4064 nvm->ops.reload(hw);
4070 DEBUGOUT1("NVM update error: %d\n", ret_val);
4076 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
4077 * @hw: pointer to the HW structure
4079 * The NVM checksum is updated by calling the generic update_nvm_checksum,
4080 * which writes the checksum to the shadow ram. The changes in the shadow
4081 * ram are then committed to the EEPROM by processing each bank at a time
4082 * checking for the modified bit and writing only the pending changes.
4083 * After a successful commit, the shadow ram is cleared and is ready for
4086 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
4088 struct e1000_nvm_info *nvm = &hw->nvm;
4089 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4090 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4094 DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
4096 ret_val = e1000_update_nvm_checksum_generic(hw);
4100 if (nvm->type != e1000_nvm_flash_sw)
4103 nvm->ops.acquire(hw);
4105 /* We're writing to the opposite bank so if we're on bank 1,
4106 * write to bank 0 etc. We also need to erase the segment that
4107 * is going to be written
4109 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4110 if (ret_val != E1000_SUCCESS) {
4111 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4116 new_bank_offset = nvm->flash_bank_size;
4117 old_bank_offset = 0;
4118 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4122 old_bank_offset = nvm->flash_bank_size;
4123 new_bank_offset = 0;
4124 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4128 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4129 if (dev_spec->shadow_ram[i].modified) {
4130 data = dev_spec->shadow_ram[i].value;
4132 ret_val = e1000_read_flash_word_ich8lan(hw, i +
4138 /* If the word is 0x13, then make sure the signature bits
4139 * (15:14) are 11b until the commit has completed.
4140 * This will allow us to write 10b which indicates the
4141 * signature is valid. We want to do this after the write
4142 * has completed so that we don't mark the segment valid
4143 * while the write is still in progress
4145 if (i == E1000_ICH_NVM_SIG_WORD)
4146 data |= E1000_ICH_NVM_SIG_MASK;
4148 /* Convert offset to bytes. */
4149 act_offset = (i + new_bank_offset) << 1;
4153 /* Write the bytes to the new bank. */
4154 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4161 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4168 /* Don't bother writing the segment valid bits if sector
4169 * programming failed.
4172 DEBUGOUT("Flash commit failed.\n");
4176 /* Finally validate the new segment by setting bit 15:14
4177 * to 10b in word 0x13 , this can be done without an
4178 * erase as well since these bits are 11 to start with
4179 * and we need to change bit 14 to 0b
4181 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4182 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
4187 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1,
4192 /* And invalidate the previously valid segment by setting
4193 * its signature word (0x13) high_byte to 0b. This can be
4194 * done without an erase because flash erase sets all bits
4195 * to 1's. We can write 1's to 0's without an erase
4197 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4199 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
4204 /* Great! Everything worked, we can now clear the cached entries. */
4205 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4206 dev_spec->shadow_ram[i].modified = FALSE;
4207 dev_spec->shadow_ram[i].value = 0xFFFF;
4211 nvm->ops.release(hw);
4213 /* Reload the EEPROM, or else modifications will not appear
4214 * until after the next adapter reset.
4217 nvm->ops.reload(hw);
4223 DEBUGOUT1("NVM update error: %d\n", ret_val);
4229 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
4230 * @hw: pointer to the HW structure
4232 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
4233 * If the bit is 0, that the EEPROM had been modified, but the checksum was not
4234 * calculated, in which case we need to calculate the checksum and set bit 6.
4236 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
4241 u16 valid_csum_mask;
4243 DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
4245 /* Read NVM and check Invalid Image CSUM bit. If this bit is 0,
4246 * the checksum needs to be fixed. This bit is an indication that
4247 * the NVM was prepared by OEM software and did not calculate
4248 * the checksum...a likely scenario.
4250 switch (hw->mac.type) {
4254 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
4257 word = NVM_FUTURE_INIT_WORD1;
4258 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
4262 ret_val = hw->nvm.ops.read(hw, word, 1, &data);
4266 if (!(data & valid_csum_mask)) {
4267 data |= valid_csum_mask;
4268 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
4271 ret_val = hw->nvm.ops.update(hw);
4276 return e1000_validate_nvm_checksum_generic(hw);
4280 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
4281 * @hw: pointer to the HW structure
4282 * @offset: The offset (in bytes) of the byte/word to read.
4283 * @size: Size of data to read, 1=byte 2=word
4284 * @data: The byte(s) to write to the NVM.
4286 * Writes one/two bytes to the NVM using the flash access registers.
4288 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
4291 union ich8_hws_flash_status hsfsts;
4292 union ich8_hws_flash_ctrl hsflctl;
4293 u32 flash_linear_addr;
4298 DEBUGFUNC("e1000_write_ich8_data");
4300 if (hw->mac.type == e1000_pch_spt) {
4301 if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4302 return -E1000_ERR_NVM;
4304 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4305 return -E1000_ERR_NVM;
4308 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4309 hw->nvm.flash_base_addr);
4314 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4315 if (ret_val != E1000_SUCCESS)
4317 if (hw->mac.type == e1000_pch_spt)
4318 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS) >> 16;
4320 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
4322 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4323 hsflctl.hsf_ctrl.fldbcount = size - 1;
4324 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4325 if (hw->mac.type == e1000_pch_spt)
4326 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, hsflctl.regval << 16);
4328 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
4330 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4333 flash_data = (u32)data & 0x00FF;
4335 flash_data = (u32)data;
4337 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
4339 /* check if FCERR is set to 1 , if set to 1, clear it
4340 * and try the whole sequence a few more times else done
4343 e1000_flash_cycle_ich8lan(hw,
4344 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4345 if (ret_val == E1000_SUCCESS)
4348 /* If we're here, then things are most likely
4349 * completely hosed, but if the error condition
4350 * is detected, it won't hurt to give it another
4351 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4353 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4354 if (hsfsts.hsf_status.flcerr)
4355 /* Repeat for some time before giving up. */
4357 if (!hsfsts.hsf_status.flcdone) {
4358 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4361 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4367 * e1000_write_flash_data32_ich8lan - Writes 32-bit words to the NVM
4368 * @hw: pointer to the HW structure
4369 * @offset: The offset (in bytes) of the 32-bit word to read.
4370 * @data: The byte(s) to write to the NVM.
4372 * Writes one/two bytes to the NVM using the flash access registers.
4374 static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
4377 union ich8_hws_flash_status hsfsts;
4378 union ich8_hws_flash_ctrl hsflctl;
4379 u32 flash_linear_addr;
4383 DEBUGFUNC("e1000_write_ich8_data");
4385 if (hw->mac.type == e1000_pch_spt) {
4386 if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
4387 return -E1000_ERR_NVM;
4389 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4390 hw->nvm.flash_base_addr);
4395 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4396 if (ret_val != E1000_SUCCESS)
4398 if (hw->mac.type == e1000_pch_spt) {
4399 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS) >> 16;
4401 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
4404 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4405 hsflctl.hsf_ctrl.fldbcount = sizeof(int32_t) - 1;
4406 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4408 /* In SPT, This register is in Lan memory space,
4409 * not flash. Therefore, only 32 bit access is
4412 if (hw->mac.type == e1000_pch_spt) {
4413 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, hsflctl.regval << 16);
4415 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
4418 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4420 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, data);
4422 /* check if FCERR is set to 1 , if set to 1, clear it
4423 * and try the whole sequence a few more times else done
4426 e1000_flash_cycle_ich8lan(hw,
4427 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4428 if (ret_val == E1000_SUCCESS)
4431 /* If we're here, then things are most likely
4432 * completely hosed, but if the error condition
4433 * is detected, it won't hurt to give it another
4434 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4436 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4437 if (hsfsts.hsf_status.flcerr)
4438 /* Repeat for some time before giving up. */
4440 if (!hsfsts.hsf_status.flcdone) {
4441 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4444 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4451 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
4452 * @hw: pointer to the HW structure
4453 * @offset: The index of the byte to read.
4454 * @data: The byte to write to the NVM.
4456 * Writes a single byte to the NVM using the flash access registers.
4458 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
4461 u16 word = (u16)data;
4463 DEBUGFUNC("e1000_write_flash_byte_ich8lan");
4465 return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
4469 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
4470 * @hw: pointer to the HW structure
4471 * @offset: The offset of the byte to write.
4472 * @byte: The byte to write to the NVM.
4474 * Writes a single byte to the NVM using the flash access registers.
4475 * Goes through a retry algorithm before giving up.
4477 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
4478 u32 offset, u8 byte)
4481 u16 program_retries;
4483 DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
4485 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4489 for (program_retries = 0; program_retries < 100; program_retries++) {
4490 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
4492 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4493 if (ret_val == E1000_SUCCESS)
4496 if (program_retries == 100)
4497 return -E1000_ERR_NVM;
4499 return E1000_SUCCESS;
4503 * e1000_retry_write_flash_dword_ich8lan - Writes a 32-bit word to NVM
4504 * @hw: pointer to the HW structure
4505 * @offset: The offset of the byte to write.
4506 * @dword: The dword to write to the NVM.
4508 * Writes a single 32-bit word to the NVM using the flash access registers.
4509 * Goes through a retry algorithm before giving up.
4511 static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
4512 u32 offset, u32 dword)
4515 u16 program_retries;
4517 DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
4519 ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4523 for (program_retries = 0; program_retries < 100; program_retries++) {
4524 DEBUGOUT2("Retrying DWord %08X at offset %u\n", dword, offset);
4526 ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4527 if (ret_val == E1000_SUCCESS)
4530 if (program_retries == 100)
4531 return -E1000_ERR_NVM;
4533 return E1000_SUCCESS;
4537 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
4538 * @hw: pointer to the HW structure
4539 * @bank: 0 for first bank, 1 for second bank, etc.
4541 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
4542 * bank N is 4096 * N + flash_reg_addr.
4544 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
4546 struct e1000_nvm_info *nvm = &hw->nvm;
4547 union ich8_hws_flash_status hsfsts;
4548 union ich8_hws_flash_ctrl hsflctl;
4549 u32 flash_linear_addr;
4550 /* bank size is in 16bit words - adjust to bytes */
4551 u32 flash_bank_size = nvm->flash_bank_size * 2;
4554 s32 j, iteration, sector_size;
4556 DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
4558 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4560 /* Determine HW Sector size: Read BERASE bits of hw flash status
4562 * 00: The Hw sector is 256 bytes, hence we need to erase 16
4563 * consecutive sectors. The start index for the nth Hw sector
4564 * can be calculated as = bank * 4096 + n * 256
4565 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
4566 * The start index for the nth Hw sector can be calculated
4568 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
4569 * (ich9 only, otherwise error condition)
4570 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
4572 switch (hsfsts.hsf_status.berasesz) {
4574 /* Hw sector size 256 */
4575 sector_size = ICH_FLASH_SEG_SIZE_256;
4576 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
4579 sector_size = ICH_FLASH_SEG_SIZE_4K;
4583 sector_size = ICH_FLASH_SEG_SIZE_8K;
4587 sector_size = ICH_FLASH_SEG_SIZE_64K;
4591 return -E1000_ERR_NVM;
4594 /* Start with the base address, then add the sector offset. */
4595 flash_linear_addr = hw->nvm.flash_base_addr;
4596 flash_linear_addr += (bank) ? flash_bank_size : 0;
4598 for (j = 0; j < iteration; j++) {
4600 u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4603 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4607 /* Write a value 11 (block Erase) in Flash
4608 * Cycle field in hw flash control
4610 if (hw->mac.type == e1000_pch_spt)
4612 E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS) >> 16;
4615 E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
4617 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4618 if (hw->mac.type == e1000_pch_spt)
4619 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4620 hsflctl.regval << 16);
4622 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4625 /* Write the last 24 bits of an index within the
4626 * block into Flash Linear address field in Flash
4629 flash_linear_addr += (j * sector_size);
4630 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
4633 ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4634 if (ret_val == E1000_SUCCESS)
4637 /* Check if FCERR is set to 1. If 1,
4638 * clear it and try the whole sequence
4639 * a few more times else Done
4641 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4643 if (hsfsts.hsf_status.flcerr)
4644 /* repeat for some time before giving up */
4646 else if (!hsfsts.hsf_status.flcdone)
4648 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4651 return E1000_SUCCESS;
4655 * e1000_valid_led_default_ich8lan - Set the default LED settings
4656 * @hw: pointer to the HW structure
4657 * @data: Pointer to the LED settings
4659 * Reads the LED default settings from the NVM to data. If the NVM LED
4660 * settings is all 0's or F's, set the LED default to a valid LED default
4663 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4667 DEBUGFUNC("e1000_valid_led_default_ich8lan");
4669 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4671 DEBUGOUT("NVM Read Error\n");
4675 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4676 *data = ID_LED_DEFAULT_ICH8LAN;
4678 return E1000_SUCCESS;
4682 * e1000_id_led_init_pchlan - store LED configurations
4683 * @hw: pointer to the HW structure
4685 * PCH does not control LEDs via the LEDCTL register, rather it uses
4686 * the PHY LED configuration register.
4688 * PCH also does not have an "always on" or "always off" mode which
4689 * complicates the ID feature. Instead of using the "on" mode to indicate
4690 * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4691 * use "link_up" mode. The LEDs will still ID on request if there is no
4692 * link based on logic in e1000_led_[on|off]_pchlan().
4694 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4696 struct e1000_mac_info *mac = &hw->mac;
4698 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4699 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4700 u16 data, i, temp, shift;
4702 DEBUGFUNC("e1000_id_led_init_pchlan");
4704 /* Get default ID LED modes */
4705 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4709 mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4710 mac->ledctl_mode1 = mac->ledctl_default;
4711 mac->ledctl_mode2 = mac->ledctl_default;
4713 for (i = 0; i < 4; i++) {
4714 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4717 case ID_LED_ON1_DEF2:
4718 case ID_LED_ON1_ON2:
4719 case ID_LED_ON1_OFF2:
4720 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4721 mac->ledctl_mode1 |= (ledctl_on << shift);
4723 case ID_LED_OFF1_DEF2:
4724 case ID_LED_OFF1_ON2:
4725 case ID_LED_OFF1_OFF2:
4726 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4727 mac->ledctl_mode1 |= (ledctl_off << shift);
4734 case ID_LED_DEF1_ON2:
4735 case ID_LED_ON1_ON2:
4736 case ID_LED_OFF1_ON2:
4737 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4738 mac->ledctl_mode2 |= (ledctl_on << shift);
4740 case ID_LED_DEF1_OFF2:
4741 case ID_LED_ON1_OFF2:
4742 case ID_LED_OFF1_OFF2:
4743 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4744 mac->ledctl_mode2 |= (ledctl_off << shift);
4752 return E1000_SUCCESS;
4756 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4757 * @hw: pointer to the HW structure
4759 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4760 * register, so the the bus width is hard coded.
4762 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4764 struct e1000_bus_info *bus = &hw->bus;
4767 DEBUGFUNC("e1000_get_bus_info_ich8lan");
4769 ret_val = e1000_get_bus_info_pcie_generic(hw);
4771 /* ICH devices are "PCI Express"-ish. They have
4772 * a configuration space, but do not contain
4773 * PCI Express Capability registers, so bus width
4774 * must be hardcoded.
4776 if (bus->width == e1000_bus_width_unknown)
4777 bus->width = e1000_bus_width_pcie_x1;
4783 * e1000_reset_hw_ich8lan - Reset the hardware
4784 * @hw: pointer to the HW structure
4786 * Does a full reset of the hardware which includes a reset of the PHY and
4789 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4791 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4796 DEBUGFUNC("e1000_reset_hw_ich8lan");
4798 /* Prevent the PCI-E bus from sticking if there is no TLP connection
4799 * on the last TLP read/write transaction when MAC is reset.
4801 ret_val = e1000_disable_pcie_master_generic(hw);
4803 DEBUGOUT("PCI-E Master disable polling has failed.\n");
4805 DEBUGOUT("Masking off all interrupts\n");
4806 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4808 /* Disable the Transmit and Receive units. Then delay to allow
4809 * any pending transactions to complete before we hit the MAC
4810 * with the global reset.
4812 E1000_WRITE_REG(hw, E1000_RCTL, 0);
4813 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4814 E1000_WRITE_FLUSH(hw);
4818 /* Workaround for ICH8 bit corruption issue in FIFO memory */
4819 if (hw->mac.type == e1000_ich8lan) {
4820 /* Set Tx and Rx buffer allocation to 8k apiece. */
4821 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4822 /* Set Packet Buffer Size to 16k. */
4823 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4826 if (hw->mac.type == e1000_pchlan) {
4827 /* Save the NVM K1 bit setting*/
4828 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4832 if (kum_cfg & E1000_NVM_K1_ENABLE)
4833 dev_spec->nvm_k1_enabled = TRUE;
4835 dev_spec->nvm_k1_enabled = FALSE;
4838 ctrl = E1000_READ_REG(hw, E1000_CTRL);
4840 if (!hw->phy.ops.check_reset_block(hw)) {
4841 /* Full-chip reset requires MAC and PHY reset at the same
4842 * time to make sure the interface between MAC and the
4843 * external PHY is reset.
4845 ctrl |= E1000_CTRL_PHY_RST;
4847 /* Gate automatic PHY configuration by hardware on
4850 if ((hw->mac.type == e1000_pch2lan) &&
4851 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
4852 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
4854 ret_val = e1000_acquire_swflag_ich8lan(hw);
4855 DEBUGOUT("Issuing a global reset to ich8lan\n");
4856 E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
4857 /* cannot issue a flush here because it hangs the hardware */
4860 /* Set Phy Config Counter to 50msec */
4861 if (hw->mac.type == e1000_pch2lan) {
4862 reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
4863 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
4864 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
4865 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
4868 if (ctrl & E1000_CTRL_PHY_RST) {
4869 ret_val = hw->phy.ops.get_cfg_done(hw);
4873 ret_val = e1000_post_phy_reset_ich8lan(hw);
4878 /* For PCH, this write will make sure that any noise
4879 * will be detected as a CRC error and be dropped rather than show up
4880 * as a bad packet to the DMA engine.
4882 if (hw->mac.type == e1000_pchlan)
4883 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
4885 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4886 E1000_READ_REG(hw, E1000_ICR);
4888 reg = E1000_READ_REG(hw, E1000_KABGTXD);
4889 reg |= E1000_KABGTXD_BGSQLBIAS;
4890 E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
4892 return E1000_SUCCESS;
4896 * e1000_init_hw_ich8lan - Initialize the hardware
4897 * @hw: pointer to the HW structure
4899 * Prepares the hardware for transmit and receive by doing the following:
4900 * - initialize hardware bits
4901 * - initialize LED identification
4902 * - setup receive address registers
4903 * - setup flow control
4904 * - setup transmit descriptors
4905 * - clear statistics
4907 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
4909 struct e1000_mac_info *mac = &hw->mac;
4910 u32 ctrl_ext, txdctl, snoop;
4914 DEBUGFUNC("e1000_init_hw_ich8lan");
4916 e1000_initialize_hw_bits_ich8lan(hw);
4918 /* Initialize identification LED */
4919 ret_val = mac->ops.id_led_init(hw);
4920 /* An error is not fatal and we should not stop init due to this */
4922 DEBUGOUT("Error initializing identification LED\n");
4924 /* Setup the receive address. */
4925 e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
4927 /* Zero out the Multicast HASH table */
4928 DEBUGOUT("Zeroing the MTA\n");
4929 for (i = 0; i < mac->mta_reg_count; i++)
4930 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
4932 /* The 82578 Rx buffer will stall if wakeup is enabled in host and
4933 * the ME. Disable wakeup by clearing the host wakeup bit.
4934 * Reset the phy after disabling host wakeup to reset the Rx buffer.
4936 if (hw->phy.type == e1000_phy_82578) {
4937 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
4938 i &= ~BM_WUC_HOST_WU_BIT;
4939 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
4940 ret_val = e1000_phy_hw_reset_ich8lan(hw);
4945 /* Setup link and flow control */
4946 ret_val = mac->ops.setup_link(hw);
4948 /* Set the transmit descriptor write-back policy for both queues */
4949 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
4950 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4951 E1000_TXDCTL_FULL_TX_DESC_WB);
4952 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4953 E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4954 E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
4955 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
4956 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4957 E1000_TXDCTL_FULL_TX_DESC_WB);
4958 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4959 E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4960 E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
4962 /* ICH8 has opposite polarity of no_snoop bits.
4963 * By default, we should use snoop behavior.
4965 if (mac->type == e1000_ich8lan)
4966 snoop = PCIE_ICH8_SNOOP_ALL;
4968 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
4969 e1000_set_pcie_no_snoop_generic(hw, snoop);
4971 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
4972 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
4973 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
4975 /* Clear all of the statistics registers (clear on read). It is
4976 * important that we do this after we have tried to establish link
4977 * because the symbol error count will increment wildly if there
4980 e1000_clear_hw_cntrs_ich8lan(hw);
4986 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
4987 * @hw: pointer to the HW structure
4989 * Sets/Clears required hardware bits necessary for correctly setting up the
4990 * hardware for transmit and receive.
4992 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
4996 DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
4998 /* Extended Device Control */
4999 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
5001 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
5002 if (hw->mac.type >= e1000_pchlan)
5003 reg |= E1000_CTRL_EXT_PHYPDEN;
5004 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
5006 /* Transmit Descriptor Control 0 */
5007 reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
5009 E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
5011 /* Transmit Descriptor Control 1 */
5012 reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
5014 E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
5016 /* Transmit Arbitration Control 0 */
5017 reg = E1000_READ_REG(hw, E1000_TARC(0));
5018 if (hw->mac.type == e1000_ich8lan)
5019 reg |= (1 << 28) | (1 << 29);
5020 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
5021 E1000_WRITE_REG(hw, E1000_TARC(0), reg);
5023 /* Transmit Arbitration Control 1 */
5024 reg = E1000_READ_REG(hw, E1000_TARC(1));
5025 if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
5029 reg |= (1 << 24) | (1 << 26) | (1 << 30);
5030 E1000_WRITE_REG(hw, E1000_TARC(1), reg);
5033 if (hw->mac.type == e1000_ich8lan) {
5034 reg = E1000_READ_REG(hw, E1000_STATUS);
5036 E1000_WRITE_REG(hw, E1000_STATUS, reg);
5039 /* work-around descriptor data corruption issue during nfs v2 udp
5040 * traffic, just disable the nfs filtering capability
5042 reg = E1000_READ_REG(hw, E1000_RFCTL);
5043 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
5045 /* Disable IPv6 extension header parsing because some malformed
5046 * IPv6 headers can hang the Rx.
5048 if (hw->mac.type == e1000_ich8lan)
5049 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
5050 E1000_WRITE_REG(hw, E1000_RFCTL, reg);
5052 /* Enable ECC on Lynxpoint */
5053 if (hw->mac.type == e1000_pch_lpt ||
5054 hw->mac.type == e1000_pch_spt) {
5055 reg = E1000_READ_REG(hw, E1000_PBECCSTS);
5056 reg |= E1000_PBECCSTS_ECC_ENABLE;
5057 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
5059 reg = E1000_READ_REG(hw, E1000_CTRL);
5060 reg |= E1000_CTRL_MEHE;
5061 E1000_WRITE_REG(hw, E1000_CTRL, reg);
5068 * e1000_setup_link_ich8lan - Setup flow control and link settings
5069 * @hw: pointer to the HW structure
5071 * Determines which flow control settings to use, then configures flow
5072 * control. Calls the appropriate media-specific link configuration
5073 * function. Assuming the adapter has a valid link partner, a valid link
5074 * should be established. Assumes the hardware has previously been reset
5075 * and the transmitter and receiver are not enabled.
5077 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
5081 DEBUGFUNC("e1000_setup_link_ich8lan");
5083 if (hw->phy.ops.check_reset_block(hw))
5084 return E1000_SUCCESS;
5086 /* ICH parts do not have a word in the NVM to determine
5087 * the default flow control setting, so we explicitly
5090 if (hw->fc.requested_mode == e1000_fc_default)
5091 hw->fc.requested_mode = e1000_fc_full;
5093 /* Save off the requested flow control mode for use later. Depending
5094 * on the link partner's capabilities, we may or may not use this mode.
5096 hw->fc.current_mode = hw->fc.requested_mode;
5098 DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
5099 hw->fc.current_mode);
5101 /* Continue to configure the copper link. */
5102 ret_val = hw->mac.ops.setup_physical_interface(hw);
5106 E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
5107 if ((hw->phy.type == e1000_phy_82578) ||
5108 (hw->phy.type == e1000_phy_82579) ||
5109 (hw->phy.type == e1000_phy_i217) ||
5110 (hw->phy.type == e1000_phy_82577)) {
5111 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
5113 ret_val = hw->phy.ops.write_reg(hw,
5114 PHY_REG(BM_PORT_CTRL_PAGE, 27),
5120 return e1000_set_fc_watermarks_generic(hw);
5124 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
5125 * @hw: pointer to the HW structure
5127 * Configures the kumeran interface to the PHY to wait the appropriate time
5128 * when polling the PHY, then call the generic setup_copper_link to finish
5129 * configuring the copper link.
5131 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
5137 DEBUGFUNC("e1000_setup_copper_link_ich8lan");
5139 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5140 ctrl |= E1000_CTRL_SLU;
5141 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5142 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5144 /* Set the mac to wait the maximum time between each iteration
5145 * and increase the max iterations when polling the phy;
5146 * this fixes erroneous timeouts at 10Mbps.
5148 ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
5152 ret_val = e1000_read_kmrn_reg_generic(hw,
5153 E1000_KMRNCTRLSTA_INBAND_PARAM,
5158 ret_val = e1000_write_kmrn_reg_generic(hw,
5159 E1000_KMRNCTRLSTA_INBAND_PARAM,
5164 switch (hw->phy.type) {
5165 case e1000_phy_igp_3:
5166 ret_val = e1000_copper_link_setup_igp(hw);
5171 case e1000_phy_82578:
5172 ret_val = e1000_copper_link_setup_m88(hw);
5176 case e1000_phy_82577:
5177 case e1000_phy_82579:
5178 ret_val = e1000_copper_link_setup_82577(hw);
5183 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
5188 reg_data &= ~IFE_PMC_AUTO_MDIX;
5190 switch (hw->phy.mdix) {
5192 reg_data &= ~IFE_PMC_FORCE_MDIX;
5195 reg_data |= IFE_PMC_FORCE_MDIX;
5199 reg_data |= IFE_PMC_AUTO_MDIX;
5202 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
5211 return e1000_setup_copper_link_generic(hw);
5215 * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
5216 * @hw: pointer to the HW structure
5218 * Calls the PHY specific link setup function and then calls the
5219 * generic setup_copper_link to finish configuring the link for
5220 * Lynxpoint PCH devices
5222 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
5227 DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
5229 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5230 ctrl |= E1000_CTRL_SLU;
5231 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5232 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5234 ret_val = e1000_copper_link_setup_82577(hw);
5238 return e1000_setup_copper_link_generic(hw);
5242 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
5243 * @hw: pointer to the HW structure
5244 * @speed: pointer to store current link speed
5245 * @duplex: pointer to store the current link duplex
5247 * Calls the generic get_speed_and_duplex to retrieve the current link
5248 * information and then calls the Kumeran lock loss workaround for links at
5251 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
5256 DEBUGFUNC("e1000_get_link_up_info_ich8lan");
5258 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
5262 if ((hw->mac.type == e1000_ich8lan) &&
5263 (hw->phy.type == e1000_phy_igp_3) &&
5264 (*speed == SPEED_1000)) {
5265 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
5272 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
5273 * @hw: pointer to the HW structure
5275 * Work-around for 82566 Kumeran PCS lock loss:
5276 * On link status change (i.e. PCI reset, speed change) and link is up and
5278 * 0) if workaround is optionally disabled do nothing
5279 * 1) wait 1ms for Kumeran link to come up
5280 * 2) check Kumeran Diagnostic register PCS lock loss bit
5281 * 3) if not set the link is locked (all is good), otherwise...
5283 * 5) repeat up to 10 times
5284 * Note: this is only called for IGP3 copper when speed is 1gb.
5286 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
5288 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5294 DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
5296 if (!dev_spec->kmrn_lock_loss_workaround_enabled)
5297 return E1000_SUCCESS;
5299 /* Make sure link is up before proceeding. If not just return.
5300 * Attempting this while link is negotiating fouled up link
5303 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
5305 return E1000_SUCCESS;
5307 for (i = 0; i < 10; i++) {
5308 /* read once to clear */
5309 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5312 /* and again to get new status */
5313 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5317 /* check for PCS lock */
5318 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
5319 return E1000_SUCCESS;
5321 /* Issue PHY reset */
5322 hw->phy.ops.reset(hw);
5325 /* Disable GigE link negotiation */
5326 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5327 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
5328 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5329 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5331 /* Call gig speed drop workaround on Gig disable before accessing
5334 e1000_gig_downshift_workaround_ich8lan(hw);
5336 /* unable to acquire PCS lock */
5337 return -E1000_ERR_PHY;
5341 * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
5342 * @hw: pointer to the HW structure
5343 * @state: boolean value used to set the current Kumeran workaround state
5345 * If ICH8, set the current Kumeran workaround state (enabled - TRUE
5346 * /disabled - FALSE).
5348 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
5351 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5353 DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
5355 if (hw->mac.type != e1000_ich8lan) {
5356 DEBUGOUT("Workaround applies to ICH8 only.\n");
5360 dev_spec->kmrn_lock_loss_workaround_enabled = state;
5366 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
5367 * @hw: pointer to the HW structure
5369 * Workaround for 82566 power-down on D3 entry:
5370 * 1) disable gigabit link
5371 * 2) write VR power-down enable
5373 * Continue if successful, else issue LCD reset and repeat
5375 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
5381 DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
5383 if (hw->phy.type != e1000_phy_igp_3)
5386 /* Try the workaround twice (if needed) */
5389 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
5390 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
5391 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5392 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
5394 /* Call gig speed drop workaround on Gig disable before
5395 * accessing any PHY registers
5397 if (hw->mac.type == e1000_ich8lan)
5398 e1000_gig_downshift_workaround_ich8lan(hw);
5400 /* Write VR power-down enable */
5401 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5402 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5403 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
5404 data | IGP3_VR_CTRL_MODE_SHUTDOWN);
5406 /* Read it back and test */
5407 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5408 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5409 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
5412 /* Issue PHY reset and repeat at most one more time */
5413 reg = E1000_READ_REG(hw, E1000_CTRL);
5414 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
5420 * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
5421 * @hw: pointer to the HW structure
5423 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
5424 * LPLU, Gig disable, MDIC PHY reset):
5425 * 1) Set Kumeran Near-end loopback
5426 * 2) Clear Kumeran Near-end loopback
5427 * Should only be called for ICH8[m] devices with any 1G Phy.
5429 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
5434 DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
5436 if ((hw->mac.type != e1000_ich8lan) ||
5437 (hw->phy.type == e1000_phy_ife))
5440 ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5444 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
5445 ret_val = e1000_write_kmrn_reg_generic(hw,
5446 E1000_KMRNCTRLSTA_DIAG_OFFSET,
5450 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
5451 e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5456 * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
5457 * @hw: pointer to the HW structure
5459 * During S0 to Sx transition, it is possible the link remains at gig
5460 * instead of negotiating to a lower speed. Before going to Sx, set
5461 * 'Gig Disable' to force link speed negotiation to a lower speed based on
5462 * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
5463 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
5464 * needs to be written.
5465 * Parts that support (and are linked to a partner which support) EEE in
5466 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
5467 * than 10Mbps w/o EEE.
5469 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
5471 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5475 DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
5477 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5478 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
5480 if (hw->phy.type == e1000_phy_i217) {
5481 u16 phy_reg, device_id = hw->device_id;
5483 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
5484 (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
5485 (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
5486 (device_id == E1000_DEV_ID_PCH_I218_V3) ||
5487 (hw->mac.type == e1000_pch_spt)) {
5488 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
5490 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
5491 fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
5494 ret_val = hw->phy.ops.acquire(hw);
5498 if (!dev_spec->eee_disable) {
5502 e1000_read_emi_reg_locked(hw,
5503 I217_EEE_ADVERTISEMENT,
5508 /* Disable LPLU if both link partners support 100BaseT
5509 * EEE and 100Full is advertised on both ends of the
5510 * link, and enable Auto Enable LPI since there will
5511 * be no driver to enable LPI while in Sx.
5513 if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
5514 (dev_spec->eee_lp_ability &
5515 I82579_EEE_100_SUPPORTED) &&
5516 (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
5517 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
5518 E1000_PHY_CTRL_NOND0A_LPLU);
5520 /* Set Auto Enable LPI after link up */
5521 hw->phy.ops.read_reg_locked(hw,
5524 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5525 hw->phy.ops.write_reg_locked(hw,
5531 /* For i217 Intel Rapid Start Technology support,
5532 * when the system is going into Sx and no manageability engine
5533 * is present, the driver must configure proxy to reset only on
5534 * power good. LPI (Low Power Idle) state must also reset only
5535 * on power good, as well as the MTA (Multicast table array).
5536 * The SMBus release must also be disabled on LCD reset.
5538 if (!(E1000_READ_REG(hw, E1000_FWSM) &
5539 E1000_ICH_FWSM_FW_VALID)) {
5540 /* Enable proxy to reset only on power good. */
5541 hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
5543 phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
5544 hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
5547 /* Set bit enable LPI (EEE) to reset only on
5550 hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
5551 phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
5552 hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
5554 /* Disable the SMB release on LCD reset. */
5555 hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
5556 phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
5557 hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5560 /* Enable MTA to reset for Intel Rapid Start Technology
5563 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
5564 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
5565 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5568 hw->phy.ops.release(hw);
5571 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5573 if (hw->mac.type == e1000_ich8lan)
5574 e1000_gig_downshift_workaround_ich8lan(hw);
5576 if (hw->mac.type >= e1000_pchlan) {
5577 e1000_oem_bits_config_ich8lan(hw, FALSE);
5579 /* Reset PHY to activate OEM bits on 82577/8 */
5580 if (hw->mac.type == e1000_pchlan)
5581 e1000_phy_hw_reset_generic(hw);
5583 ret_val = hw->phy.ops.acquire(hw);
5586 e1000_write_smbus_addr(hw);
5587 hw->phy.ops.release(hw);
5594 * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
5595 * @hw: pointer to the HW structure
5597 * During Sx to S0 transitions on non-managed devices or managed devices
5598 * on which PHY resets are not blocked, if the PHY registers cannot be
5599 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
5601 * On i217, setup Intel Rapid Start Technology.
5603 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
5607 DEBUGFUNC("e1000_resume_workarounds_pchlan");
5609 if (hw->mac.type < e1000_pch2lan)
5612 ret_val = e1000_init_phy_workarounds_pchlan(hw);
5614 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
5618 /* For i217 Intel Rapid Start Technology support when the system
5619 * is transitioning from Sx and no manageability engine is present
5620 * configure SMBus to restore on reset, disable proxy, and enable
5621 * the reset on MTA (Multicast table array).
5623 if (hw->phy.type == e1000_phy_i217) {
5626 ret_val = hw->phy.ops.acquire(hw);
5628 DEBUGOUT("Failed to setup iRST\n");
5632 /* Clear Auto Enable LPI after link up */
5633 hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5634 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5635 hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5637 if (!(E1000_READ_REG(hw, E1000_FWSM) &
5638 E1000_ICH_FWSM_FW_VALID)) {
5639 /* Restore clear on SMB if no manageability engine
5642 ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
5646 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5647 hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5650 hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
5652 /* Enable reset on MTA */
5653 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
5657 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5658 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5661 DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
5662 hw->phy.ops.release(hw);
5667 * e1000_cleanup_led_ich8lan - Restore the default LED operation
5668 * @hw: pointer to the HW structure
5670 * Return the LED back to the default configuration.
5672 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5674 DEBUGFUNC("e1000_cleanup_led_ich8lan");
5676 if (hw->phy.type == e1000_phy_ife)
5677 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5680 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5681 return E1000_SUCCESS;
5685 * e1000_led_on_ich8lan - Turn LEDs on
5686 * @hw: pointer to the HW structure
5690 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5692 DEBUGFUNC("e1000_led_on_ich8lan");
5694 if (hw->phy.type == e1000_phy_ife)
5695 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5696 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5698 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5699 return E1000_SUCCESS;
5703 * e1000_led_off_ich8lan - Turn LEDs off
5704 * @hw: pointer to the HW structure
5706 * Turn off the LEDs.
5708 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5710 DEBUGFUNC("e1000_led_off_ich8lan");
5712 if (hw->phy.type == e1000_phy_ife)
5713 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5714 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5716 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5717 return E1000_SUCCESS;
5721 * e1000_setup_led_pchlan - Configures SW controllable LED
5722 * @hw: pointer to the HW structure
5724 * This prepares the SW controllable LED for use.
5726 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5728 DEBUGFUNC("e1000_setup_led_pchlan");
5730 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5731 (u16)hw->mac.ledctl_mode1);
5735 * e1000_cleanup_led_pchlan - Restore the default LED operation
5736 * @hw: pointer to the HW structure
5738 * Return the LED back to the default configuration.
5740 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5742 DEBUGFUNC("e1000_cleanup_led_pchlan");
5744 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5745 (u16)hw->mac.ledctl_default);
5749 * e1000_led_on_pchlan - Turn LEDs on
5750 * @hw: pointer to the HW structure
5754 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5756 u16 data = (u16)hw->mac.ledctl_mode2;
5759 DEBUGFUNC("e1000_led_on_pchlan");
5761 /* If no link, then turn LED on by setting the invert bit
5762 * for each LED that's mode is "link_up" in ledctl_mode2.
5764 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5765 for (i = 0; i < 3; i++) {
5766 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5767 if ((led & E1000_PHY_LED0_MODE_MASK) !=
5768 E1000_LEDCTL_MODE_LINK_UP)
5770 if (led & E1000_PHY_LED0_IVRT)
5771 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5773 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5777 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5781 * e1000_led_off_pchlan - Turn LEDs off
5782 * @hw: pointer to the HW structure
5784 * Turn off the LEDs.
5786 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5788 u16 data = (u16)hw->mac.ledctl_mode1;
5791 DEBUGFUNC("e1000_led_off_pchlan");
5793 /* If no link, then turn LED off by clearing the invert bit
5794 * for each LED that's mode is "link_up" in ledctl_mode1.
5796 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5797 for (i = 0; i < 3; i++) {
5798 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5799 if ((led & E1000_PHY_LED0_MODE_MASK) !=
5800 E1000_LEDCTL_MODE_LINK_UP)
5802 if (led & E1000_PHY_LED0_IVRT)
5803 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5805 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5809 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5813 * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5814 * @hw: pointer to the HW structure
5816 * Read appropriate register for the config done bit for completion status
5817 * and configure the PHY through s/w for EEPROM-less parts.
5819 * NOTE: some silicon which is EEPROM-less will fail trying to read the
5820 * config done bit, so only an error is logged and continues. If we were
5821 * to return with error, EEPROM-less silicon would not be able to be reset
5824 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5826 s32 ret_val = E1000_SUCCESS;
5830 DEBUGFUNC("e1000_get_cfg_done_ich8lan");
5832 e1000_get_cfg_done_generic(hw);
5834 /* Wait for indication from h/w that it has completed basic config */
5835 if (hw->mac.type >= e1000_ich10lan) {
5836 e1000_lan_init_done_ich8lan(hw);
5838 ret_val = e1000_get_auto_rd_done_generic(hw);
5840 /* When auto config read does not complete, do not
5841 * return with an error. This can happen in situations
5842 * where there is no eeprom and prevents getting link.
5844 DEBUGOUT("Auto Read Done did not complete\n");
5845 ret_val = E1000_SUCCESS;
5849 /* Clear PHY Reset Asserted bit */
5850 status = E1000_READ_REG(hw, E1000_STATUS);
5851 if (status & E1000_STATUS_PHYRA)
5852 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
5854 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
5856 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
5857 if (hw->mac.type <= e1000_ich9lan) {
5858 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
5859 (hw->phy.type == e1000_phy_igp_3)) {
5860 e1000_phy_init_script_igp3(hw);
5863 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
5864 /* Maybe we should do a basic PHY config */
5865 DEBUGOUT("EEPROM not present\n");
5866 ret_val = -E1000_ERR_CONFIG;
5874 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
5875 * @hw: pointer to the HW structure
5877 * In the case of a PHY power down to save power, or to turn off link during a
5878 * driver unload, or wake on lan is not enabled, remove the link.
5880 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
5882 /* If the management interface is not enabled, then power down */
5883 if (!(hw->mac.ops.check_mng_mode(hw) ||
5884 hw->phy.ops.check_reset_block(hw)))
5885 e1000_power_down_phy_copper(hw);
5891 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
5892 * @hw: pointer to the HW structure
5894 * Clears hardware counters specific to the silicon family and calls
5895 * clear_hw_cntrs_generic to clear all general purpose counters.
5897 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
5902 DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
5904 e1000_clear_hw_cntrs_base_generic(hw);
5906 E1000_READ_REG(hw, E1000_ALGNERRC);
5907 E1000_READ_REG(hw, E1000_RXERRC);
5908 E1000_READ_REG(hw, E1000_TNCRS);
5909 E1000_READ_REG(hw, E1000_CEXTERR);
5910 E1000_READ_REG(hw, E1000_TSCTC);
5911 E1000_READ_REG(hw, E1000_TSCTFC);
5913 E1000_READ_REG(hw, E1000_MGTPRC);
5914 E1000_READ_REG(hw, E1000_MGTPDC);
5915 E1000_READ_REG(hw, E1000_MGTPTC);
5917 E1000_READ_REG(hw, E1000_IAC);
5918 E1000_READ_REG(hw, E1000_ICRXOC);
5920 /* Clear PHY statistics registers */
5921 if ((hw->phy.type == e1000_phy_82578) ||
5922 (hw->phy.type == e1000_phy_82579) ||
5923 (hw->phy.type == e1000_phy_i217) ||
5924 (hw->phy.type == e1000_phy_82577)) {
5925 ret_val = hw->phy.ops.acquire(hw);
5928 ret_val = hw->phy.ops.set_page(hw,
5929 HV_STATS_PAGE << IGP_PAGE_SHIFT);
5932 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
5933 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
5934 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
5935 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
5936 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
5937 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
5938 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
5939 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
5940 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
5941 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
5942 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
5943 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
5944 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
5945 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
5947 hw->phy.ops.release(hw);