1 /******************************************************************************
3 Copyright (c) 2001-2009, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 * 82562G 10/100 Network Connection
37 * 82562G-2 10/100 Network Connection
38 * 82562GT 10/100 Network Connection
39 * 82562GT-2 10/100 Network Connection
40 * 82562V 10/100 Network Connection
41 * 82562V-2 10/100 Network Connection
42 * 82566DC-2 Gigabit Network Connection
43 * 82566DC Gigabit Network Connection
44 * 82566DM-2 Gigabit Network Connection
45 * 82566DM Gigabit Network Connection
46 * 82566MC Gigabit Network Connection
47 * 82566MM Gigabit Network Connection
48 * 82567LM Gigabit Network Connection
49 * 82567LF Gigabit Network Connection
50 * 82567V Gigabit Network Connection
51 * 82567LM-2 Gigabit Network Connection
52 * 82567LF-2 Gigabit Network Connection
53 * 82567V-2 Gigabit Network Connection
54 * 82567LF-3 Gigabit Network Connection
55 * 82567LM-3 Gigabit Network Connection
56 * 82567LM-4 Gigabit Network Connection
57 * 82577LM Gigabit Network Connection
58 * 82577LC Gigabit Network Connection
59 * 82578DM Gigabit Network Connection
60 * 82578DC Gigabit Network Connection
61 * 82579LM Gigabit Network Connection
62 * 82579V Gigabit Network Connection
65 #include "e1000_api.h"
67 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw);
68 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw);
69 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw);
70 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw);
71 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
72 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
73 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
74 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
75 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
76 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
77 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
78 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
81 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
82 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
83 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
84 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
86 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
88 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
89 u16 words, u16 *data);
90 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
91 u16 words, u16 *data);
92 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
93 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
94 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
96 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
97 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
98 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw);
99 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw);
100 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
101 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
102 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
103 u16 *speed, u16 *duplex);
104 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
105 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
106 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
107 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
108 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
109 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
110 static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
111 static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
112 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
113 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
114 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout);
115 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw);
116 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
117 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
118 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
119 u32 offset, u8 *data);
120 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
122 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
123 u32 offset, u16 *data);
124 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
125 u32 offset, u8 byte);
126 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw,
127 u32 offset, u8 data);
128 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
130 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
131 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
132 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
133 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
134 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
135 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
136 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
137 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
139 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
140 /* Offset 04h HSFSTS */
141 union ich8_hws_flash_status {
143 u16 flcdone :1; /* bit 0 Flash Cycle Done */
144 u16 flcerr :1; /* bit 1 Flash Cycle Error */
145 u16 dael :1; /* bit 2 Direct Access error Log */
146 u16 berasesz :2; /* bit 4:3 Sector Erase Size */
147 u16 flcinprog :1; /* bit 5 flash cycle in Progress */
148 u16 reserved1 :2; /* bit 13:6 Reserved */
149 u16 reserved2 :6; /* bit 13:6 Reserved */
150 u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
151 u16 flockdn :1; /* bit 15 Flash Config Lock-Down */
156 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
157 /* Offset 06h FLCTL */
158 union ich8_hws_flash_ctrl {
159 struct ich8_hsflctl {
160 u16 flcgo :1; /* 0 Flash Cycle Go */
161 u16 flcycle :2; /* 2:1 Flash Cycle */
162 u16 reserved :5; /* 7:3 Reserved */
163 u16 fldbcount :2; /* 9:8 Flash Data Byte Count */
164 u16 flockdn :6; /* 15:10 Reserved */
169 /* ICH Flash Region Access Permissions */
170 union ich8_hws_flash_regacc {
172 u32 grra :8; /* 0:7 GbE region Read Access */
173 u32 grwa :8; /* 8:15 GbE region Write Access */
174 u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */
175 u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */
180 static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw)
184 DEBUGFUNC("e1000_toggle_lanphypc_value_ich8lan");
186 ctrl = E1000_READ_REG(hw, E1000_CTRL);
187 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
188 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
189 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
191 ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
192 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
196 * e1000_init_phy_params_pchlan - Initialize PHY function pointers
197 * @hw: pointer to the HW structure
199 * Initialize family-specific PHY parameters and function pointers.
201 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
203 struct e1000_phy_info *phy = &hw->phy;
205 s32 ret_val = E1000_SUCCESS;
207 DEBUGFUNC("e1000_init_phy_params_pchlan");
210 phy->reset_delay_us = 100;
212 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
213 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
214 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
215 phy->ops.set_page = e1000_set_page_igp;
216 phy->ops.read_reg = e1000_read_phy_reg_hv;
217 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
218 phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
219 phy->ops.release = e1000_release_swflag_ich8lan;
220 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
221 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
222 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
223 phy->ops.write_reg = e1000_write_phy_reg_hv;
224 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
225 phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
226 phy->ops.power_up = e1000_power_up_phy_copper;
227 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
228 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
231 * The MAC-PHY interconnect may still be in SMBus mode
232 * after Sx->S0. If the manageability engine (ME) is
233 * disabled, then toggle the LANPHYPC Value bit to force
234 * the interconnect to PCIe mode.
236 fwsm = E1000_READ_REG(hw, E1000_FWSM);
237 if (!(fwsm & E1000_ICH_FWSM_FW_VALID) &&
238 !hw->phy.ops.check_reset_block(hw)) {
239 e1000_toggle_lanphypc_value_ich8lan(hw);
243 * Gate automatic PHY configuration by hardware on
246 if (hw->mac.type == e1000_pch2lan)
247 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
251 * Reset the PHY before any acccess to it. Doing so, ensures that
252 * the PHY is in a known good state before we read/write PHY registers.
253 * The generic reset is sufficient here, because we haven't determined
256 ret_val = e1000_phy_hw_reset_generic(hw);
260 /* Ungate automatic PHY configuration on non-managed 82579 */
261 if ((hw->mac.type == e1000_pch2lan) &&
262 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
264 e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
267 phy->id = e1000_phy_unknown;
268 switch (hw->mac.type) {
270 ret_val = e1000_get_phy_id(hw);
273 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
278 * In case the PHY needs to be in mdio slow mode,
279 * set slow mode and try to get the PHY id again.
281 ret_val = e1000_set_mdio_slow_mode_hv(hw);
284 ret_val = e1000_get_phy_id(hw);
289 phy->type = e1000_get_phy_type_from_id(phy->id);
292 case e1000_phy_82577:
293 case e1000_phy_82579:
294 phy->ops.check_polarity = e1000_check_polarity_82577;
295 phy->ops.force_speed_duplex =
296 e1000_phy_force_speed_duplex_82577;
297 phy->ops.get_cable_length = e1000_get_cable_length_82577;
298 phy->ops.get_info = e1000_get_phy_info_82577;
299 phy->ops.commit = e1000_phy_sw_reset_generic;
301 case e1000_phy_82578:
302 phy->ops.check_polarity = e1000_check_polarity_m88;
303 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
304 phy->ops.get_cable_length = e1000_get_cable_length_m88;
305 phy->ops.get_info = e1000_get_phy_info_m88;
308 ret_val = -E1000_ERR_PHY;
317 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
318 * @hw: pointer to the HW structure
320 * Initialize family-specific PHY parameters and function pointers.
322 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
324 struct e1000_phy_info *phy = &hw->phy;
325 s32 ret_val = E1000_SUCCESS;
328 DEBUGFUNC("e1000_init_phy_params_ich8lan");
331 phy->reset_delay_us = 100;
333 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
334 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
335 phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
336 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
337 phy->ops.read_reg = e1000_read_phy_reg_igp;
338 phy->ops.release = e1000_release_swflag_ich8lan;
339 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
340 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
341 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
342 phy->ops.write_reg = e1000_write_phy_reg_igp;
343 phy->ops.power_up = e1000_power_up_phy_copper;
344 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
347 * We may need to do this twice - once for IGP and if that fails,
348 * we'll set BM func pointers and try again
350 ret_val = e1000_determine_phy_address(hw);
352 phy->ops.write_reg = e1000_write_phy_reg_bm;
353 phy->ops.read_reg = e1000_read_phy_reg_bm;
354 ret_val = e1000_determine_phy_address(hw);
356 DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
362 while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
365 ret_val = e1000_get_phy_id(hw);
372 case IGP03E1000_E_PHY_ID:
373 phy->type = e1000_phy_igp_3;
374 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
375 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
376 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
377 phy->ops.get_info = e1000_get_phy_info_igp;
378 phy->ops.check_polarity = e1000_check_polarity_igp;
379 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
382 case IFE_PLUS_E_PHY_ID:
384 phy->type = e1000_phy_ife;
385 phy->autoneg_mask = E1000_ALL_NOT_GIG;
386 phy->ops.get_info = e1000_get_phy_info_ife;
387 phy->ops.check_polarity = e1000_check_polarity_ife;
388 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
390 case BME1000_E_PHY_ID:
391 phy->type = e1000_phy_bm;
392 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
393 phy->ops.read_reg = e1000_read_phy_reg_bm;
394 phy->ops.write_reg = e1000_write_phy_reg_bm;
395 phy->ops.commit = e1000_phy_sw_reset_generic;
396 phy->ops.get_info = e1000_get_phy_info_m88;
397 phy->ops.check_polarity = e1000_check_polarity_m88;
398 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
401 ret_val = -E1000_ERR_PHY;
410 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
411 * @hw: pointer to the HW structure
413 * Initialize family-specific NVM parameters and function
416 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
418 struct e1000_nvm_info *nvm = &hw->nvm;
419 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
420 u32 gfpreg, sector_base_addr, sector_end_addr;
421 s32 ret_val = E1000_SUCCESS;
424 DEBUGFUNC("e1000_init_nvm_params_ich8lan");
426 /* Can't read flash registers if the register set isn't mapped. */
427 if (!hw->flash_address) {
428 DEBUGOUT("ERROR: Flash registers not mapped\n");
429 ret_val = -E1000_ERR_CONFIG;
433 nvm->type = e1000_nvm_flash_sw;
435 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
438 * sector_X_addr is a "sector"-aligned address (4096 bytes)
439 * Add 1 to sector_end_addr since this sector is included in
442 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
443 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
445 /* flash_base_addr is byte-aligned */
446 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
449 * find total size of the NVM, then cut in half since the total
450 * size represents two separate NVM banks.
452 nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
453 << FLASH_SECTOR_ADDR_SHIFT;
454 nvm->flash_bank_size /= 2;
455 /* Adjust to word count */
456 nvm->flash_bank_size /= sizeof(u16);
458 nvm->word_size = E1000_SHADOW_RAM_WORDS;
460 /* Clear shadow ram */
461 for (i = 0; i < nvm->word_size; i++) {
462 dev_spec->shadow_ram[i].modified = FALSE;
463 dev_spec->shadow_ram[i].value = 0xFFFF;
466 /* Function Pointers */
467 nvm->ops.acquire = e1000_acquire_nvm_ich8lan;
468 nvm->ops.release = e1000_release_nvm_ich8lan;
469 nvm->ops.read = e1000_read_nvm_ich8lan;
470 nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
471 nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
472 nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan;
473 nvm->ops.write = e1000_write_nvm_ich8lan;
480 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
481 * @hw: pointer to the HW structure
483 * Initialize family-specific MAC parameters and function
486 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
488 struct e1000_mac_info *mac = &hw->mac;
490 DEBUGFUNC("e1000_init_mac_params_ich8lan");
492 /* Set media type function pointer */
493 hw->phy.media_type = e1000_media_type_copper;
495 /* Set mta register count */
496 mac->mta_reg_count = 32;
497 /* Set rar entry count */
498 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
499 if (mac->type == e1000_ich8lan)
500 mac->rar_entry_count--;
501 /* Set if part includes ASF firmware */
502 mac->asf_firmware_present = TRUE;
504 mac->has_fwsm = TRUE;
505 /* ARC subsystem not supported */
506 mac->arc_subsystem_valid = FALSE;
507 /* Adaptive IFS supported */
508 mac->adaptive_ifs = TRUE;
510 /* Function pointers */
512 /* bus type/speed/width */
513 mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
515 mac->ops.set_lan_id = e1000_set_lan_id_single_port;
517 mac->ops.reset_hw = e1000_reset_hw_ich8lan;
518 /* hw initialization */
519 mac->ops.init_hw = e1000_init_hw_ich8lan;
521 mac->ops.setup_link = e1000_setup_link_ich8lan;
522 /* physical interface setup */
523 mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
525 mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
527 mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
528 /* multicast address update */
529 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
530 /* clear hardware counters */
531 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
538 /* check management mode */
539 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
541 mac->ops.id_led_init = e1000_id_led_init_generic;
543 mac->ops.blink_led = e1000_blink_led_generic;
545 mac->ops.setup_led = e1000_setup_led_generic;
547 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
548 /* turn on/off LED */
549 mac->ops.led_on = e1000_led_on_ich8lan;
550 mac->ops.led_off = e1000_led_off_ich8lan;
553 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
554 mac->ops.rar_set = e1000_rar_set_pch2lan;
555 /* multicast address update for pch2 */
556 mac->ops.update_mc_addr_list =
557 e1000_update_mc_addr_list_pch2lan;
560 /* check management mode */
561 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
563 mac->ops.id_led_init = e1000_id_led_init_pchlan;
565 mac->ops.setup_led = e1000_setup_led_pchlan;
567 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
568 /* turn on/off LED */
569 mac->ops.led_on = e1000_led_on_pchlan;
570 mac->ops.led_off = e1000_led_off_pchlan;
576 /* Enable PCS Lock-loss workaround for ICH8 */
577 if (mac->type == e1000_ich8lan)
578 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
580 /* Gate automatic PHY configuration by hardware on managed 82579 */
581 if ((mac->type == e1000_pch2lan) &&
582 (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
583 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
585 return E1000_SUCCESS;
589 * e1000_set_eee_pchlan - Enable/disable EEE support
590 * @hw: pointer to the HW structure
592 * Enable/disable EEE based on setting in dev_spec structure. The bits in
593 * the LPI Control register will remain set only if/when link is up.
595 static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
597 s32 ret_val = E1000_SUCCESS;
600 DEBUGFUNC("e1000_set_eee_pchlan");
602 if (hw->phy.type != e1000_phy_82579)
605 ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg);
609 if (hw->dev_spec.ich8lan.eee_disable)
610 phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
612 phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
614 ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg);
620 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
621 * @hw: pointer to the HW structure
623 * Checks to see of the link status of the hardware has changed. If a
624 * change in link status has been detected, then we read the PHY registers
625 * to get the current speed/duplex if link exists.
627 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
629 struct e1000_mac_info *mac = &hw->mac;
633 DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
636 * We only want to go out to the PHY registers to see if Auto-Neg
637 * has completed and/or if our link status has changed. The
638 * get_link_status flag is set upon receiving a Link Status
639 * Change or Rx Sequence Error interrupt.
641 if (!mac->get_link_status) {
642 ret_val = E1000_SUCCESS;
647 * First we want to see if the MII Status Register reports
648 * link. If so, then we want to get the current speed/duplex
651 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
655 if (hw->mac.type == e1000_pchlan) {
656 ret_val = e1000_k1_gig_workaround_hv(hw, link);
662 goto out; /* No link detected */
664 mac->get_link_status = FALSE;
666 switch (hw->mac.type) {
668 ret_val = e1000_k1_workaround_lv(hw);
673 if (hw->phy.type == e1000_phy_82578) {
674 ret_val = e1000_link_stall_workaround_hv(hw);
680 * Workaround for PCHx parts in half-duplex:
681 * Set the number of preambles removed from the packet
682 * when it is passed from the PHY to the MAC to prevent
683 * the MAC from misinterpreting the packet type.
685 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
689 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA,
691 preambles &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
692 preambles |= (4 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
693 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
702 * Check if there was DownShift, must be checked
703 * immediately after link-up
705 e1000_check_downshift_generic(hw);
707 /* Enable/Disable EEE after link up */
708 ret_val = e1000_set_eee_pchlan(hw);
713 * If we are forcing speed/duplex, then we simply return since
714 * we have already determined whether we have link or not.
717 ret_val = -E1000_ERR_CONFIG;
722 * Auto-Neg is enabled. Auto Speed Detection takes care
723 * of MAC speed/duplex configuration. So we only need to
724 * configure Collision Distance in the MAC.
726 e1000_config_collision_dist_generic(hw);
729 * Configure Flow Control now that Auto-Neg has completed.
730 * First, we need to restore the desired flow control
731 * settings because we may have had to re-autoneg with a
732 * different link partner.
734 ret_val = e1000_config_fc_after_link_up_generic(hw);
736 DEBUGOUT("Error configuring flow control\n");
743 * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
744 * @hw: pointer to the HW structure
746 * Initialize family-specific function pointers for PHY, MAC, and NVM.
748 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
750 DEBUGFUNC("e1000_init_function_pointers_ich8lan");
752 hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
753 hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
754 switch (hw->mac.type) {
758 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
762 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
770 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
771 * @hw: pointer to the HW structure
773 * Acquires the mutex for performing NVM operations.
775 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
777 DEBUGFUNC("e1000_acquire_nvm_ich8lan");
779 return E1000_SUCCESS;
783 * e1000_release_nvm_ich8lan - Release NVM mutex
784 * @hw: pointer to the HW structure
786 * Releases the mutex used while performing NVM operations.
788 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
790 DEBUGFUNC("e1000_release_nvm_ich8lan");
796 * e1000_acquire_swflag_ich8lan - Acquire software control flag
797 * @hw: pointer to the HW structure
799 * Acquires the software control flag for performing PHY and select
802 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
804 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
805 s32 ret_val = E1000_SUCCESS;
807 DEBUGFUNC("e1000_acquire_swflag_ich8lan");
810 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
811 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
819 DEBUGOUT("SW/FW/HW has locked the resource for too long.\n");
820 ret_val = -E1000_ERR_CONFIG;
824 timeout = SW_FLAG_TIMEOUT;
826 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
827 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
830 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
831 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
839 DEBUGOUT("Failed to acquire the semaphore.\n");
840 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
841 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
842 ret_val = -E1000_ERR_CONFIG;
852 * e1000_release_swflag_ich8lan - Release software control flag
853 * @hw: pointer to the HW structure
855 * Releases the software control flag for performing PHY and select
858 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
862 DEBUGFUNC("e1000_release_swflag_ich8lan");
864 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
866 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
867 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
868 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
870 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
877 * e1000_check_mng_mode_ich8lan - Checks management mode
878 * @hw: pointer to the HW structure
880 * This checks if the adapter has any manageability enabled.
881 * This is a function pointer entry point only called by read/write
882 * routines for the PHY and NVM parts.
884 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
888 DEBUGFUNC("e1000_check_mng_mode_ich8lan");
890 fwsm = E1000_READ_REG(hw, E1000_FWSM);
892 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
893 ((fwsm & E1000_FWSM_MODE_MASK) ==
894 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
898 * e1000_check_mng_mode_pchlan - Checks management mode
899 * @hw: pointer to the HW structure
901 * This checks if the adapter has iAMT enabled.
902 * This is a function pointer entry point only called by read/write
903 * routines for the PHY and NVM parts.
905 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
909 DEBUGFUNC("e1000_check_mng_mode_pchlan");
911 fwsm = E1000_READ_REG(hw, E1000_FWSM);
913 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
914 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
918 * e1000_rar_set_pch2lan - Set receive address register
919 * @hw: pointer to the HW structure
920 * @addr: pointer to the receive address
921 * @index: receive address array register
923 * Sets the receive address array register at index to the address passed
924 * in by addr. For 82579, RAR[0] is the base address register that is to
925 * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
926 * Use SHRA[0-3] in place of those reserved for ME.
928 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
930 u32 rar_low, rar_high;
932 DEBUGFUNC("e1000_rar_set_pch2lan");
935 * HW expects these in little endian so we reverse the byte order
936 * from network order (big endian) to little endian
938 rar_low = ((u32) addr[0] |
939 ((u32) addr[1] << 8) |
940 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
942 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
944 /* If MAC address zero, no need to set the AV bit */
945 if (rar_low || rar_high)
946 rar_high |= E1000_RAH_AV;
949 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
950 E1000_WRITE_FLUSH(hw);
951 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
952 E1000_WRITE_FLUSH(hw);
956 if (index < hw->mac.rar_entry_count) {
957 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
958 E1000_WRITE_FLUSH(hw);
959 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
960 E1000_WRITE_FLUSH(hw);
962 /* verify the register updates */
963 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
964 (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
967 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
968 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
971 DEBUGOUT1("Failed to write receive address at index %d\n", index);
975 * e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
976 * @hw: pointer to the HW structure
977 * @mc_addr_list: array of multicast addresses to program
978 * @mc_addr_count: number of multicast addresses to program
980 * Updates entire Multicast Table Array of the PCH2 MAC and PHY.
981 * The caller must have a packed mc_addr_list of multicast addresses.
983 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
991 DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
993 e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
995 ret_val = hw->phy.ops.acquire(hw);
999 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1003 for (i = 0; i < hw->mac.mta_reg_count; i++) {
1004 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
1005 (u16)(hw->mac.mta_shadow[i] &
1007 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
1008 (u16)((hw->mac.mta_shadow[i] >> 16) &
1012 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1015 hw->phy.ops.release(hw);
1019 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1020 * @hw: pointer to the HW structure
1022 * Checks if firmware is blocking the reset of the PHY.
1023 * This is a function pointer entry point only called by
1026 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1030 DEBUGFUNC("e1000_check_reset_block_ich8lan");
1032 if (hw->phy.reset_disable)
1033 return E1000_BLK_PHY_RESET;
1035 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1037 return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? E1000_SUCCESS
1038 : E1000_BLK_PHY_RESET;
1042 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
1043 * @hw: pointer to the HW structure
1045 * Assumes semaphore already acquired.
1048 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
1051 u32 strap = E1000_READ_REG(hw, E1000_STRAP);
1052 s32 ret_val = E1000_SUCCESS;
1054 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
1056 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
1060 phy_data &= ~HV_SMB_ADDR_MASK;
1061 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1062 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1063 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
1070 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
1071 * @hw: pointer to the HW structure
1073 * SW should configure the LCD from the NVM extended configuration region
1074 * as a workaround for certain parts.
1076 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1078 struct e1000_phy_info *phy = &hw->phy;
1079 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
1080 s32 ret_val = E1000_SUCCESS;
1081 u16 word_addr, reg_data, reg_addr, phy_page = 0;
1083 DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
1086 * Initialize the PHY from the NVM on ICH platforms. This
1087 * is needed due to an issue where the NVM configuration is
1088 * not properly autoloaded after power transitions.
1089 * Therefore, after each PHY reset, we will load the
1090 * configuration data out of the NVM manually.
1092 switch (hw->mac.type) {
1094 if (phy->type != e1000_phy_igp_3)
1097 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
1098 (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
1099 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
1105 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
1111 ret_val = hw->phy.ops.acquire(hw);
1115 data = E1000_READ_REG(hw, E1000_FEXTNVM);
1116 if (!(data & sw_cfg_mask))
1120 * Make sure HW does not configure LCD from PHY
1121 * extended configuration before SW configuration
1123 data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1124 if (!(hw->mac.type == e1000_pch2lan)) {
1125 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
1129 cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
1130 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
1131 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
1135 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
1136 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
1138 if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
1139 (hw->mac.type == e1000_pchlan)) ||
1140 (hw->mac.type == e1000_pch2lan)) {
1142 * HW configures the SMBus address and LEDs when the
1143 * OEM and LCD Write Enable bits are set in the NVM.
1144 * When both NVM bits are cleared, SW will configure
1147 ret_val = e1000_write_smbus_addr(hw);
1151 data = E1000_READ_REG(hw, E1000_LEDCTL);
1152 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
1158 /* Configure LCD from extended configuration region. */
1160 /* cnf_base_addr is in DWORD */
1161 word_addr = (u16)(cnf_base_addr << 1);
1163 for (i = 0; i < cnf_size; i++) {
1164 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
1169 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
1174 /* Save off the PHY page for future writes. */
1175 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
1176 phy_page = reg_data;
1180 reg_addr &= PHY_REG_MASK;
1181 reg_addr |= phy_page;
1183 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
1190 hw->phy.ops.release(hw);
1195 * e1000_k1_gig_workaround_hv - K1 Si workaround
1196 * @hw: pointer to the HW structure
1197 * @link: link up bool flag
1199 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
1200 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
1201 * If link is down, the function will restore the default K1 setting located
1204 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1206 s32 ret_val = E1000_SUCCESS;
1208 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
1210 DEBUGFUNC("e1000_k1_gig_workaround_hv");
1212 if (hw->mac.type != e1000_pchlan)
1215 /* Wrap the whole flow with the sw flag */
1216 ret_val = hw->phy.ops.acquire(hw);
1220 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
1222 if (hw->phy.type == e1000_phy_82578) {
1223 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
1228 status_reg &= BM_CS_STATUS_LINK_UP |
1229 BM_CS_STATUS_RESOLVED |
1230 BM_CS_STATUS_SPEED_MASK;
1232 if (status_reg == (BM_CS_STATUS_LINK_UP |
1233 BM_CS_STATUS_RESOLVED |
1234 BM_CS_STATUS_SPEED_1000))
1238 if (hw->phy.type == e1000_phy_82577) {
1239 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
1244 status_reg &= HV_M_STATUS_LINK_UP |
1245 HV_M_STATUS_AUTONEG_COMPLETE |
1246 HV_M_STATUS_SPEED_MASK;
1248 if (status_reg == (HV_M_STATUS_LINK_UP |
1249 HV_M_STATUS_AUTONEG_COMPLETE |
1250 HV_M_STATUS_SPEED_1000))
1254 /* Link stall fix for link up */
1255 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1261 /* Link stall fix for link down */
1262 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1268 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
1271 hw->phy.ops.release(hw);
1277 * e1000_configure_k1_ich8lan - Configure K1 power state
1278 * @hw: pointer to the HW structure
1279 * @enable: K1 state to configure
1281 * Configure the K1 power state based on the provided parameter.
1282 * Assumes semaphore already acquired.
1284 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1286 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1288 s32 ret_val = E1000_SUCCESS;
1294 DEBUGFUNC("e1000_configure_k1_ich8lan");
1296 ret_val = e1000_read_kmrn_reg_locked(hw,
1297 E1000_KMRNCTRLSTA_K1_CONFIG,
1303 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
1305 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
1307 ret_val = e1000_write_kmrn_reg_locked(hw,
1308 E1000_KMRNCTRLSTA_K1_CONFIG,
1314 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1315 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1317 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1318 reg |= E1000_CTRL_FRCSPD;
1319 E1000_WRITE_REG(hw, E1000_CTRL, reg);
1321 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1323 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1324 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1332 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
1333 * @hw: pointer to the HW structure
1334 * @d0_state: boolean if entering d0 or d3 device state
1336 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
1337 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
1338 * in NVM determines whether HW should configure LPLU and Gbe Disable.
1340 s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1346 DEBUGFUNC("e1000_oem_bits_config_ich8lan");
1348 if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan))
1351 ret_val = hw->phy.ops.acquire(hw);
1355 if (!(hw->mac.type == e1000_pch2lan)) {
1356 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1357 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1361 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
1362 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
1365 mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
1367 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
1371 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
1374 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
1375 oem_reg |= HV_OEM_BITS_GBE_DIS;
1377 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1378 oem_reg |= HV_OEM_BITS_LPLU;
1380 if (mac_reg & E1000_PHY_CTRL_NOND0A_GBE_DISABLE)
1381 oem_reg |= HV_OEM_BITS_GBE_DIS;
1383 if (mac_reg & E1000_PHY_CTRL_NOND0A_LPLU)
1384 oem_reg |= HV_OEM_BITS_LPLU;
1386 /* Restart auto-neg to activate the bits */
1387 if (!hw->phy.ops.check_reset_block(hw))
1388 oem_reg |= HV_OEM_BITS_RESTART_AN;
1389 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
1392 hw->phy.ops.release(hw);
1399 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
1400 * @hw: pointer to the HW structure
1402 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
1407 DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
1409 ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
1413 data |= HV_KMRN_MDIO_SLOW;
1415 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
1421 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1422 * done after every PHY reset.
1424 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1426 s32 ret_val = E1000_SUCCESS;
1429 DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
1431 if (hw->mac.type != e1000_pchlan)
1434 /* Set MDIO slow mode before any other MDIO access */
1435 if (hw->phy.type == e1000_phy_82577) {
1436 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1441 if (((hw->phy.type == e1000_phy_82577) &&
1442 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
1443 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
1444 /* Disable generation of early preamble */
1445 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
1449 /* Preamble tuning for SSC */
1450 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204);
1455 if (hw->phy.type == e1000_phy_82578) {
1457 * Return registers to default by doing a soft reset then
1458 * writing 0x3140 to the control register.
1460 if (hw->phy.revision < 2) {
1461 e1000_phy_sw_reset_generic(hw);
1462 ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
1468 ret_val = hw->phy.ops.acquire(hw);
1473 ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
1474 hw->phy.ops.release(hw);
1479 * Configure the K1 Si workaround during phy reset assuming there is
1480 * link so that it disables K1 if link is in 1Gbps.
1482 ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
1486 /* Workaround for link disconnects on a busy hub in half duplex */
1487 ret_val = hw->phy.ops.acquire(hw);
1490 ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG_REG,
1494 ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG_REG,
1497 hw->phy.ops.release(hw);
1503 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
1504 * @hw: pointer to the HW structure
1506 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
1512 DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
1514 ret_val = hw->phy.ops.acquire(hw);
1517 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1521 /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
1522 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1523 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
1524 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
1525 (u16)(mac_reg & 0xFFFF));
1526 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
1527 (u16)((mac_reg >> 16) & 0xFFFF));
1529 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
1530 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
1531 (u16)(mac_reg & 0xFFFF));
1532 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
1533 (u16)((mac_reg & E1000_RAH_AV)
1537 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1540 hw->phy.ops.release(hw);
1543 static u32 e1000_calc_rx_da_crc(u8 mac[])
1545 u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
1546 u32 i, j, mask, crc;
1548 DEBUGFUNC("e1000_calc_rx_da_crc");
1551 for (i = 0; i < 6; i++) {
1553 for (j = 8; j > 0; j--) {
1554 mask = (crc & 1) * (-1);
1555 crc = (crc >> 1) ^ (poly & mask);
1562 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
1564 * @hw: pointer to the HW structure
1565 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
1567 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1569 s32 ret_val = E1000_SUCCESS;
1574 DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
1576 if ((hw->mac.type != e1000_pch2lan) &&
1577 (hw->phy.type != e1000_phy_82579))
1580 /* disable Rx path while enabling/disabling workaround */
1581 hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
1582 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
1588 * Write Rx addresses (rar_entry_count for RAL/H, +4 for
1589 * SHRAL/H) and initial CRC values to the MAC
1591 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1592 u8 mac_addr[ETH_ADDR_LEN] = {0};
1593 u32 addr_high, addr_low;
1595 addr_high = E1000_READ_REG(hw, E1000_RAH(i));
1596 if (!(addr_high & E1000_RAH_AV))
1598 addr_low = E1000_READ_REG(hw, E1000_RAL(i));
1599 mac_addr[0] = (addr_low & 0xFF);
1600 mac_addr[1] = ((addr_low >> 8) & 0xFF);
1601 mac_addr[2] = ((addr_low >> 16) & 0xFF);
1602 mac_addr[3] = ((addr_low >> 24) & 0xFF);
1603 mac_addr[4] = (addr_high & 0xFF);
1604 mac_addr[5] = ((addr_high >> 8) & 0xFF);
1606 E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
1607 e1000_calc_rx_da_crc(mac_addr));
1610 /* Write Rx addresses to the PHY */
1611 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
1613 /* Enable jumbo frame workaround in the MAC */
1614 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
1615 mac_reg &= ~(1 << 14);
1616 mac_reg |= (7 << 15);
1617 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
1619 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
1620 mac_reg |= E1000_RCTL_SECRC;
1621 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
1623 ret_val = e1000_read_kmrn_reg_generic(hw,
1624 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1628 ret_val = e1000_write_kmrn_reg_generic(hw,
1629 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1633 ret_val = e1000_read_kmrn_reg_generic(hw,
1634 E1000_KMRNCTRLSTA_HD_CTRL,
1638 data &= ~(0xF << 8);
1640 ret_val = e1000_write_kmrn_reg_generic(hw,
1641 E1000_KMRNCTRLSTA_HD_CTRL,
1646 /* Enable jumbo frame workaround in the PHY */
1647 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
1648 data &= ~(0x7F << 5);
1649 data |= (0x37 << 5);
1650 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
1653 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
1655 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
1658 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
1659 data &= ~(0x3FF << 2);
1660 data |= (0x1A << 2);
1661 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
1664 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xFE00);
1667 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
1668 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data | (1 << 10));
1672 /* Write MAC register values back to h/w defaults */
1673 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
1674 mac_reg &= ~(0xF << 14);
1675 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
1677 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
1678 mac_reg &= ~E1000_RCTL_SECRC;
1679 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
1681 ret_val = e1000_read_kmrn_reg_generic(hw,
1682 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1686 ret_val = e1000_write_kmrn_reg_generic(hw,
1687 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1691 ret_val = e1000_read_kmrn_reg_generic(hw,
1692 E1000_KMRNCTRLSTA_HD_CTRL,
1696 data &= ~(0xF << 8);
1698 ret_val = e1000_write_kmrn_reg_generic(hw,
1699 E1000_KMRNCTRLSTA_HD_CTRL,
1704 /* Write PHY register values back to h/w defaults */
1705 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
1706 data &= ~(0x7F << 5);
1707 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
1710 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
1712 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
1715 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
1716 data &= ~(0x3FF << 2);
1718 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
1721 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
1724 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
1725 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data & ~(1 << 10));
1730 /* re-enable Rx path after enabling/disabling workaround */
1731 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
1738 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1739 * done after every PHY reset.
1741 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1743 s32 ret_val = E1000_SUCCESS;
1745 DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
1747 if (hw->mac.type != e1000_pch2lan)
1750 /* Set MDIO slow mode before any other MDIO access */
1751 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1758 * e1000_k1_gig_workaround_lv - K1 Si workaround
1759 * @hw: pointer to the HW structure
1761 * Workaround to set the K1 beacon duration for 82579 parts
1763 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
1765 s32 ret_val = E1000_SUCCESS;
1769 DEBUGFUNC("e1000_k1_workaround_lv");
1771 if (hw->mac.type != e1000_pch2lan)
1774 /* Set K1 beacon duration based on 1Gbps speed or otherwise */
1775 ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
1779 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
1780 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
1781 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1782 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1784 if (status_reg & HV_M_STATUS_SPEED_1000)
1785 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1787 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
1789 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1797 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
1798 * @hw: pointer to the HW structure
1799 * @gate: boolean set to TRUE to gate, FALSE to ungate
1801 * Gate/ungate the automatic PHY configuration via hardware; perform
1802 * the configuration via software instead.
1804 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
1808 DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
1810 if (hw->mac.type != e1000_pch2lan)
1813 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1816 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1818 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1820 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1825 * e1000_lan_init_done_ich8lan - Check for PHY config completion
1826 * @hw: pointer to the HW structure
1828 * Check the appropriate indication the MAC has finished configuring the
1829 * PHY after a software reset.
1831 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
1833 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
1835 DEBUGFUNC("e1000_lan_init_done_ich8lan");
1837 /* Wait for basic configuration completes before proceeding */
1839 data = E1000_READ_REG(hw, E1000_STATUS);
1840 data &= E1000_STATUS_LAN_INIT_DONE;
1842 } while ((!data) && --loop);
1845 * If basic configuration is incomplete before the above loop
1846 * count reaches 0, loading the configuration from NVM will
1847 * leave the PHY in a bad state possibly resulting in no link.
1850 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
1852 /* Clear the Init Done bit for the next init event */
1853 data = E1000_READ_REG(hw, E1000_STATUS);
1854 data &= ~E1000_STATUS_LAN_INIT_DONE;
1855 E1000_WRITE_REG(hw, E1000_STATUS, data);
1859 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
1860 * @hw: pointer to the HW structure
1862 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1864 s32 ret_val = E1000_SUCCESS;
1867 DEBUGFUNC("e1000_post_phy_reset_ich8lan");
1869 if (hw->phy.ops.check_reset_block(hw))
1872 /* Allow time for h/w to get to quiescent state after reset */
1875 /* Perform any necessary post-reset workarounds */
1876 switch (hw->mac.type) {
1878 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
1883 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
1891 /* Clear the host wakeup bit after lcd reset */
1892 if (hw->mac.type >= e1000_pchlan) {
1893 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, ®);
1894 reg &= ~BM_WUC_HOST_WU_BIT;
1895 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
1898 /* Configure the LCD with the extended configuration region in NVM */
1899 ret_val = e1000_sw_lcd_config_ich8lan(hw);
1903 /* Configure the LCD with the OEM bits in NVM */
1904 ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
1906 if (hw->mac.type == e1000_pch2lan) {
1907 /* Ungate automatic PHY configuration on non-managed 82579 */
1908 if (!(E1000_READ_REG(hw, E1000_FWSM) &
1909 E1000_ICH_FWSM_FW_VALID)) {
1911 e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
1914 /* Set EEE LPI Update Timer to 200usec */
1915 ret_val = hw->phy.ops.acquire(hw);
1918 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
1919 I82579_LPI_UPDATE_TIMER);
1922 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
1925 hw->phy.ops.release(hw);
1933 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
1934 * @hw: pointer to the HW structure
1937 * This is a function pointer entry point called by drivers
1938 * or other shared routines.
1940 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
1942 s32 ret_val = E1000_SUCCESS;
1944 DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
1946 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
1947 if ((hw->mac.type == e1000_pch2lan) &&
1948 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
1949 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
1951 ret_val = e1000_phy_hw_reset_generic(hw);
1955 ret_val = e1000_post_phy_reset_ich8lan(hw);
1962 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
1963 * @hw: pointer to the HW structure
1964 * @active: TRUE to enable LPLU, FALSE to disable
1966 * Sets the LPLU state according to the active flag. For PCH, if OEM write
1967 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
1968 * the phy speed. This function will manually set the LPLU bit and restart
1969 * auto-neg as hw would do. D3 and D0 LPLU will call the same function
1970 * since it configures the same bit.
1972 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
1974 s32 ret_val = E1000_SUCCESS;
1977 DEBUGFUNC("e1000_set_lplu_state_pchlan");
1979 ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
1984 oem_reg |= HV_OEM_BITS_LPLU;
1986 oem_reg &= ~HV_OEM_BITS_LPLU;
1988 oem_reg |= HV_OEM_BITS_RESTART_AN;
1989 ret_val = hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
1996 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
1997 * @hw: pointer to the HW structure
1998 * @active: TRUE to enable LPLU, FALSE to disable
2000 * Sets the LPLU D0 state according to the active flag. When
2001 * activating LPLU this function also disables smart speed
2002 * and vice versa. LPLU will not be activated unless the
2003 * device autonegotiation advertisement meets standards of
2004 * either 10 or 10/100 or 10/100/1000 at all duplexes.
2005 * This is a function pointer entry point only called by
2006 * PHY setup routines.
2008 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2010 struct e1000_phy_info *phy = &hw->phy;
2012 s32 ret_val = E1000_SUCCESS;
2015 DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
2017 if (phy->type == e1000_phy_ife)
2020 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2023 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2024 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2026 if (phy->type != e1000_phy_igp_3)
2030 * Call gig speed drop workaround on LPLU before accessing
2033 if (hw->mac.type == e1000_ich8lan)
2034 e1000_gig_downshift_workaround_ich8lan(hw);
2036 /* When LPLU is enabled, we should disable SmartSpeed */
2037 ret_val = phy->ops.read_reg(hw,
2038 IGP01E1000_PHY_PORT_CONFIG,
2040 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2041 ret_val = phy->ops.write_reg(hw,
2042 IGP01E1000_PHY_PORT_CONFIG,
2047 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
2048 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2050 if (phy->type != e1000_phy_igp_3)
2054 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
2055 * during Dx states where the power conservation is most
2056 * important. During driver activity we should enable
2057 * SmartSpeed, so performance is maintained.
2059 if (phy->smart_speed == e1000_smart_speed_on) {
2060 ret_val = phy->ops.read_reg(hw,
2061 IGP01E1000_PHY_PORT_CONFIG,
2066 data |= IGP01E1000_PSCFR_SMART_SPEED;
2067 ret_val = phy->ops.write_reg(hw,
2068 IGP01E1000_PHY_PORT_CONFIG,
2072 } else if (phy->smart_speed == e1000_smart_speed_off) {
2073 ret_val = phy->ops.read_reg(hw,
2074 IGP01E1000_PHY_PORT_CONFIG,
2079 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2080 ret_val = phy->ops.write_reg(hw,
2081 IGP01E1000_PHY_PORT_CONFIG,
2093 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
2094 * @hw: pointer to the HW structure
2095 * @active: TRUE to enable LPLU, FALSE to disable
2097 * Sets the LPLU D3 state according to the active flag. When
2098 * activating LPLU this function also disables smart speed
2099 * and vice versa. LPLU will not be activated unless the
2100 * device autonegotiation advertisement meets standards of
2101 * either 10 or 10/100 or 10/100/1000 at all duplexes.
2102 * This is a function pointer entry point only called by
2103 * PHY setup routines.
2105 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2107 struct e1000_phy_info *phy = &hw->phy;
2109 s32 ret_val = E1000_SUCCESS;
2112 DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
2114 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2117 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
2118 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2120 if (phy->type != e1000_phy_igp_3)
2124 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
2125 * during Dx states where the power conservation is most
2126 * important. During driver activity we should enable
2127 * SmartSpeed, so performance is maintained.
2129 if (phy->smart_speed == e1000_smart_speed_on) {
2130 ret_val = phy->ops.read_reg(hw,
2131 IGP01E1000_PHY_PORT_CONFIG,
2136 data |= IGP01E1000_PSCFR_SMART_SPEED;
2137 ret_val = phy->ops.write_reg(hw,
2138 IGP01E1000_PHY_PORT_CONFIG,
2142 } else if (phy->smart_speed == e1000_smart_speed_off) {
2143 ret_val = phy->ops.read_reg(hw,
2144 IGP01E1000_PHY_PORT_CONFIG,
2149 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2150 ret_val = phy->ops.write_reg(hw,
2151 IGP01E1000_PHY_PORT_CONFIG,
2156 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
2157 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
2158 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
2159 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
2160 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2162 if (phy->type != e1000_phy_igp_3)
2166 * Call gig speed drop workaround on LPLU before accessing
2169 if (hw->mac.type == e1000_ich8lan)
2170 e1000_gig_downshift_workaround_ich8lan(hw);
2172 /* When LPLU is enabled, we should disable SmartSpeed */
2173 ret_val = phy->ops.read_reg(hw,
2174 IGP01E1000_PHY_PORT_CONFIG,
2179 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2180 ret_val = phy->ops.write_reg(hw,
2181 IGP01E1000_PHY_PORT_CONFIG,
2190 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
2191 * @hw: pointer to the HW structure
2192 * @bank: pointer to the variable that returns the active bank
2194 * Reads signature byte from the NVM using the flash access registers.
2195 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
2197 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2200 struct e1000_nvm_info *nvm = &hw->nvm;
2201 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
2202 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
2204 s32 ret_val = E1000_SUCCESS;
2206 DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
2208 switch (hw->mac.type) {
2211 eecd = E1000_READ_REG(hw, E1000_EECD);
2212 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
2213 E1000_EECD_SEC1VAL_VALID_MASK) {
2214 if (eecd & E1000_EECD_SEC1VAL)
2221 DEBUGOUT("Unable to determine valid NVM bank via EEC - "
2222 "reading flash signature\n");
2225 /* set bank to 0 in case flash read fails */
2229 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
2233 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2234 E1000_ICH_NVM_SIG_VALUE) {
2240 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
2245 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2246 E1000_ICH_NVM_SIG_VALUE) {
2251 DEBUGOUT("ERROR: No valid NVM bank present\n");
2252 ret_val = -E1000_ERR_NVM;
2260 * e1000_read_nvm_ich8lan - Read word(s) from the NVM
2261 * @hw: pointer to the HW structure
2262 * @offset: The offset (in bytes) of the word(s) to read.
2263 * @words: Size of data to read in words
2264 * @data: Pointer to the word(s) to read at offset.
2266 * Reads a word(s) from the NVM using the flash access registers.
2268 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2271 struct e1000_nvm_info *nvm = &hw->nvm;
2272 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2274 s32 ret_val = E1000_SUCCESS;
2278 DEBUGFUNC("e1000_read_nvm_ich8lan");
2280 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2282 DEBUGOUT("nvm parameter(s) out of bounds\n");
2283 ret_val = -E1000_ERR_NVM;
2287 nvm->ops.acquire(hw);
2289 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2290 if (ret_val != E1000_SUCCESS) {
2291 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2295 act_offset = (bank) ? nvm->flash_bank_size : 0;
2296 act_offset += offset;
2298 ret_val = E1000_SUCCESS;
2299 for (i = 0; i < words; i++) {
2300 if ((dev_spec->shadow_ram) &&
2301 (dev_spec->shadow_ram[offset+i].modified)) {
2302 data[i] = dev_spec->shadow_ram[offset+i].value;
2304 ret_val = e1000_read_flash_word_ich8lan(hw,
2313 nvm->ops.release(hw);
2317 DEBUGOUT1("NVM read error: %d\n", ret_val);
2323 * e1000_flash_cycle_init_ich8lan - Initialize flash
2324 * @hw: pointer to the HW structure
2326 * This function does initial flash setup so that a new read/write/erase cycle
2329 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2331 union ich8_hws_flash_status hsfsts;
2332 s32 ret_val = -E1000_ERR_NVM;
2334 DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
2336 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2338 /* Check if the flash descriptor is valid */
2339 if (hsfsts.hsf_status.fldesvalid == 0) {
2340 DEBUGOUT("Flash descriptor invalid. "
2341 "SW Sequencing must be used.");
2345 /* Clear FCERR and DAEL in hw status by writing 1 */
2346 hsfsts.hsf_status.flcerr = 1;
2347 hsfsts.hsf_status.dael = 1;
2349 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2352 * Either we should have a hardware SPI cycle in progress
2353 * bit to check against, in order to start a new cycle or
2354 * FDONE bit should be changed in the hardware so that it
2355 * is 1 after hardware reset, which can then be used as an
2356 * indication whether a cycle is in progress or has been
2360 if (hsfsts.hsf_status.flcinprog == 0) {
2362 * There is no cycle running at present,
2363 * so we can start a cycle.
2364 * Begin by setting Flash Cycle Done.
2366 hsfsts.hsf_status.flcdone = 1;
2367 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2368 ret_val = E1000_SUCCESS;
2373 * Otherwise poll for sometime so the current
2374 * cycle has a chance to end before giving up.
2376 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
2377 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2379 if (hsfsts.hsf_status.flcinprog == 0) {
2380 ret_val = E1000_SUCCESS;
2385 if (ret_val == E1000_SUCCESS) {
2387 * Successful in waiting for previous cycle to timeout,
2388 * now set the Flash Cycle Done.
2390 hsfsts.hsf_status.flcdone = 1;
2391 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
2394 DEBUGOUT("Flash controller busy, cannot get access");
2403 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
2404 * @hw: pointer to the HW structure
2405 * @timeout: maximum time to wait for completion
2407 * This function starts a flash cycle and waits for its completion.
2409 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
2411 union ich8_hws_flash_ctrl hsflctl;
2412 union ich8_hws_flash_status hsfsts;
2413 s32 ret_val = -E1000_ERR_NVM;
2416 DEBUGFUNC("e1000_flash_cycle_ich8lan");
2418 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
2419 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2420 hsflctl.hsf_ctrl.flcgo = 1;
2421 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2423 /* wait till FDONE bit is set to 1 */
2425 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2426 if (hsfsts.hsf_status.flcdone == 1)
2429 } while (i++ < timeout);
2431 if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0)
2432 ret_val = E1000_SUCCESS;
2438 * e1000_read_flash_word_ich8lan - Read word from flash
2439 * @hw: pointer to the HW structure
2440 * @offset: offset to data location
2441 * @data: pointer to the location for storing the data
2443 * Reads the flash word at offset into data. Offset is converted
2444 * to bytes before read.
2446 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
2451 DEBUGFUNC("e1000_read_flash_word_ich8lan");
2454 ret_val = -E1000_ERR_NVM;
2458 /* Must convert offset into bytes. */
2461 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 2, data);
2468 * e1000_read_flash_byte_ich8lan - Read byte from flash
2469 * @hw: pointer to the HW structure
2470 * @offset: The offset of the byte to read.
2471 * @data: Pointer to a byte to store the value read.
2473 * Reads a single byte from the NVM using the flash access registers.
2475 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2478 s32 ret_val = E1000_SUCCESS;
2481 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
2492 * e1000_read_flash_data_ich8lan - Read byte or word from NVM
2493 * @hw: pointer to the HW structure
2494 * @offset: The offset (in bytes) of the byte or word to read.
2495 * @size: Size of data to read, 1=byte 2=word
2496 * @data: Pointer to the word to store the value read.
2498 * Reads a byte or word from the NVM using the flash access registers.
2500 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2503 union ich8_hws_flash_status hsfsts;
2504 union ich8_hws_flash_ctrl hsflctl;
2505 u32 flash_linear_addr;
2507 s32 ret_val = -E1000_ERR_NVM;
2510 DEBUGFUNC("e1000_read_flash_data_ich8lan");
2512 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
2515 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2516 hw->nvm.flash_base_addr;
2521 ret_val = e1000_flash_cycle_init_ich8lan(hw);
2522 if (ret_val != E1000_SUCCESS)
2525 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2526 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2527 hsflctl.hsf_ctrl.fldbcount = size - 1;
2528 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
2529 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2531 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
2533 ret_val = e1000_flash_cycle_ich8lan(hw,
2534 ICH_FLASH_READ_COMMAND_TIMEOUT);
2537 * Check if FCERR is set to 1, if set to 1, clear it
2538 * and try the whole sequence a few more times, else
2539 * read in (shift in) the Flash Data0, the order is
2540 * least significant byte first msb to lsb
2542 if (ret_val == E1000_SUCCESS) {
2543 flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
2545 *data = (u8)(flash_data & 0x000000FF);
2547 *data = (u16)(flash_data & 0x0000FFFF);
2551 * If we've gotten here, then things are probably
2552 * completely hosed, but if the error condition is
2553 * detected, it won't hurt to give it another try...
2554 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
2556 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2558 if (hsfsts.hsf_status.flcerr == 1) {
2559 /* Repeat for some time before giving up. */
2561 } else if (hsfsts.hsf_status.flcdone == 0) {
2562 DEBUGOUT("Timeout error - flash cycle "
2563 "did not complete.");
2567 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2574 * e1000_write_nvm_ich8lan - Write word(s) to the NVM
2575 * @hw: pointer to the HW structure
2576 * @offset: The offset (in bytes) of the word(s) to write.
2577 * @words: Size of data to write in words
2578 * @data: Pointer to the word(s) to write at offset.
2580 * Writes a byte or word to the NVM using the flash access registers.
2582 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2585 struct e1000_nvm_info *nvm = &hw->nvm;
2586 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2587 s32 ret_val = E1000_SUCCESS;
2590 DEBUGFUNC("e1000_write_nvm_ich8lan");
2592 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2594 DEBUGOUT("nvm parameter(s) out of bounds\n");
2595 ret_val = -E1000_ERR_NVM;
2599 nvm->ops.acquire(hw);
2601 for (i = 0; i < words; i++) {
2602 dev_spec->shadow_ram[offset+i].modified = TRUE;
2603 dev_spec->shadow_ram[offset+i].value = data[i];
2606 nvm->ops.release(hw);
2613 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
2614 * @hw: pointer to the HW structure
2616 * The NVM checksum is updated by calling the generic update_nvm_checksum,
2617 * which writes the checksum to the shadow ram. The changes in the shadow
2618 * ram are then committed to the EEPROM by processing each bank at a time
2619 * checking for the modified bit and writing only the pending changes.
2620 * After a successful commit, the shadow ram is cleared and is ready for
2623 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2625 struct e1000_nvm_info *nvm = &hw->nvm;
2626 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2627 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
2631 DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
2633 ret_val = e1000_update_nvm_checksum_generic(hw);
2637 if (nvm->type != e1000_nvm_flash_sw)
2640 nvm->ops.acquire(hw);
2643 * We're writing to the opposite bank so if we're on bank 1,
2644 * write to bank 0 etc. We also need to erase the segment that
2645 * is going to be written
2647 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2648 if (ret_val != E1000_SUCCESS) {
2649 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2654 new_bank_offset = nvm->flash_bank_size;
2655 old_bank_offset = 0;
2656 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
2660 old_bank_offset = nvm->flash_bank_size;
2661 new_bank_offset = 0;
2662 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
2667 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2669 * Determine whether to write the value stored
2670 * in the other NVM bank or a modified value stored
2673 if (dev_spec->shadow_ram[i].modified) {
2674 data = dev_spec->shadow_ram[i].value;
2676 ret_val = e1000_read_flash_word_ich8lan(hw, i +
2684 * If the word is 0x13, then make sure the signature bits
2685 * (15:14) are 11b until the commit has completed.
2686 * This will allow us to write 10b which indicates the
2687 * signature is valid. We want to do this after the write
2688 * has completed so that we don't mark the segment valid
2689 * while the write is still in progress
2691 if (i == E1000_ICH_NVM_SIG_WORD)
2692 data |= E1000_ICH_NVM_SIG_MASK;
2694 /* Convert offset to bytes. */
2695 act_offset = (i + new_bank_offset) << 1;
2698 /* Write the bytes to the new bank. */
2699 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2706 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2714 * Don't bother writing the segment valid bits if sector
2715 * programming failed.
2718 DEBUGOUT("Flash commit failed.\n");
2723 * Finally validate the new segment by setting bit 15:14
2724 * to 10b in word 0x13 , this can be done without an
2725 * erase as well since these bits are 11 to start with
2726 * and we need to change bit 14 to 0b
2728 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
2729 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
2734 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2741 * And invalidate the previously valid segment by setting
2742 * its signature word (0x13) high_byte to 0b. This can be
2743 * done without an erase because flash erase sets all bits
2744 * to 1's. We can write 1's to 0's without an erase
2746 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
2747 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
2751 /* Great! Everything worked, we can now clear the cached entries. */
2752 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2753 dev_spec->shadow_ram[i].modified = FALSE;
2754 dev_spec->shadow_ram[i].value = 0xFFFF;
2758 nvm->ops.release(hw);
2761 * Reload the EEPROM, or else modifications will not appear
2762 * until after the next adapter reset.
2765 nvm->ops.reload(hw);
2771 DEBUGOUT1("NVM update error: %d\n", ret_val);
2777 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
2778 * @hw: pointer to the HW structure
2780 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
2781 * If the bit is 0, that the EEPROM had been modified, but the checksum was not
2782 * calculated, in which case we need to calculate the checksum and set bit 6.
2784 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
2786 s32 ret_val = E1000_SUCCESS;
2789 DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
2792 * Read 0x19 and check bit 6. If this bit is 0, the checksum
2793 * needs to be fixed. This bit is an indication that the NVM
2794 * was prepared by OEM software and did not calculate the
2795 * checksum...a likely scenario.
2797 ret_val = hw->nvm.ops.read(hw, 0x19, 1, &data);
2801 if ((data & 0x40) == 0) {
2803 ret_val = hw->nvm.ops.write(hw, 0x19, 1, &data);
2806 ret_val = hw->nvm.ops.update(hw);
2811 ret_val = e1000_validate_nvm_checksum_generic(hw);
2818 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
2819 * @hw: pointer to the HW structure
2820 * @offset: The offset (in bytes) of the byte/word to read.
2821 * @size: Size of data to read, 1=byte 2=word
2822 * @data: The byte(s) to write to the NVM.
2824 * Writes one/two bytes to the NVM using the flash access registers.
2826 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2829 union ich8_hws_flash_status hsfsts;
2830 union ich8_hws_flash_ctrl hsflctl;
2831 u32 flash_linear_addr;
2833 s32 ret_val = -E1000_ERR_NVM;
2836 DEBUGFUNC("e1000_write_ich8_data");
2838 if (size < 1 || size > 2 || data > size * 0xff ||
2839 offset > ICH_FLASH_LINEAR_ADDR_MASK)
2842 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2843 hw->nvm.flash_base_addr;
2848 ret_val = e1000_flash_cycle_init_ich8lan(hw);
2849 if (ret_val != E1000_SUCCESS)
2852 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2853 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2854 hsflctl.hsf_ctrl.fldbcount = size - 1;
2855 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
2856 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2858 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
2861 flash_data = (u32)data & 0x00FF;
2863 flash_data = (u32)data;
2865 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
2868 * check if FCERR is set to 1 , if set to 1, clear it
2869 * and try the whole sequence a few more times else done
2871 ret_val = e1000_flash_cycle_ich8lan(hw,
2872 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
2873 if (ret_val == E1000_SUCCESS)
2877 * If we're here, then things are most likely
2878 * completely hosed, but if the error condition
2879 * is detected, it won't hurt to give it another
2880 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
2882 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2883 if (hsfsts.hsf_status.flcerr == 1)
2884 /* Repeat for some time before giving up. */
2886 if (hsfsts.hsf_status.flcdone == 0) {
2887 DEBUGOUT("Timeout error - flash cycle "
2888 "did not complete.");
2891 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2898 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
2899 * @hw: pointer to the HW structure
2900 * @offset: The index of the byte to read.
2901 * @data: The byte to write to the NVM.
2903 * Writes a single byte to the NVM using the flash access registers.
2905 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2908 u16 word = (u16)data;
2910 DEBUGFUNC("e1000_write_flash_byte_ich8lan");
2912 return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
2916 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
2917 * @hw: pointer to the HW structure
2918 * @offset: The offset of the byte to write.
2919 * @byte: The byte to write to the NVM.
2921 * Writes a single byte to the NVM using the flash access registers.
2922 * Goes through a retry algorithm before giving up.
2924 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
2925 u32 offset, u8 byte)
2928 u16 program_retries;
2930 DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
2932 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2933 if (ret_val == E1000_SUCCESS)
2936 for (program_retries = 0; program_retries < 100; program_retries++) {
2937 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
2939 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2940 if (ret_val == E1000_SUCCESS)
2943 if (program_retries == 100) {
2944 ret_val = -E1000_ERR_NVM;
2953 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
2954 * @hw: pointer to the HW structure
2955 * @bank: 0 for first bank, 1 for second bank, etc.
2957 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
2958 * bank N is 4096 * N + flash_reg_addr.
2960 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
2962 struct e1000_nvm_info *nvm = &hw->nvm;
2963 union ich8_hws_flash_status hsfsts;
2964 union ich8_hws_flash_ctrl hsflctl;
2965 u32 flash_linear_addr;
2966 /* bank size is in 16bit words - adjust to bytes */
2967 u32 flash_bank_size = nvm->flash_bank_size * 2;
2968 s32 ret_val = E1000_SUCCESS;
2970 s32 j, iteration, sector_size;
2972 DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
2974 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2977 * Determine HW Sector size: Read BERASE bits of hw flash status
2979 * 00: The Hw sector is 256 bytes, hence we need to erase 16
2980 * consecutive sectors. The start index for the nth Hw sector
2981 * can be calculated as = bank * 4096 + n * 256
2982 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
2983 * The start index for the nth Hw sector can be calculated
2985 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
2986 * (ich9 only, otherwise error condition)
2987 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
2989 switch (hsfsts.hsf_status.berasesz) {
2991 /* Hw sector size 256 */
2992 sector_size = ICH_FLASH_SEG_SIZE_256;
2993 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
2996 sector_size = ICH_FLASH_SEG_SIZE_4K;
3000 sector_size = ICH_FLASH_SEG_SIZE_8K;
3004 sector_size = ICH_FLASH_SEG_SIZE_64K;
3008 ret_val = -E1000_ERR_NVM;
3012 /* Start with the base address, then add the sector offset. */
3013 flash_linear_addr = hw->nvm.flash_base_addr;
3014 flash_linear_addr += (bank) ? flash_bank_size : 0;
3016 for (j = 0; j < iteration ; j++) {
3019 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3024 * Write a value 11 (block Erase) in Flash
3025 * Cycle field in hw flash control
3027 hsflctl.regval = E1000_READ_FLASH_REG16(hw,
3029 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3030 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
3034 * Write the last 24 bits of an index within the
3035 * block into Flash Linear address field in Flash
3038 flash_linear_addr += (j * sector_size);
3039 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
3042 ret_val = e1000_flash_cycle_ich8lan(hw,
3043 ICH_FLASH_ERASE_COMMAND_TIMEOUT);
3044 if (ret_val == E1000_SUCCESS)
3048 * Check if FCERR is set to 1. If 1,
3049 * clear it and try the whole sequence
3050 * a few more times else Done
3052 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3054 if (hsfsts.hsf_status.flcerr == 1)
3055 /* repeat for some time before giving up */
3057 else if (hsfsts.hsf_status.flcdone == 0)
3059 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
3067 * e1000_valid_led_default_ich8lan - Set the default LED settings
3068 * @hw: pointer to the HW structure
3069 * @data: Pointer to the LED settings
3071 * Reads the LED default settings from the NVM to data. If the NVM LED
3072 * settings is all 0's or F's, set the LED default to a valid LED default
3075 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
3079 DEBUGFUNC("e1000_valid_led_default_ich8lan");
3081 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
3083 DEBUGOUT("NVM Read Error\n");
3087 if (*data == ID_LED_RESERVED_0000 ||
3088 *data == ID_LED_RESERVED_FFFF)
3089 *data = ID_LED_DEFAULT_ICH8LAN;
3096 * e1000_id_led_init_pchlan - store LED configurations
3097 * @hw: pointer to the HW structure
3099 * PCH does not control LEDs via the LEDCTL register, rather it uses
3100 * the PHY LED configuration register.
3102 * PCH also does not have an "always on" or "always off" mode which
3103 * complicates the ID feature. Instead of using the "on" mode to indicate
3104 * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
3105 * use "link_up" mode. The LEDs will still ID on request if there is no
3106 * link based on logic in e1000_led_[on|off]_pchlan().
3108 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
3110 struct e1000_mac_info *mac = &hw->mac;
3112 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
3113 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
3114 u16 data, i, temp, shift;
3116 DEBUGFUNC("e1000_id_led_init_pchlan");
3118 /* Get default ID LED modes */
3119 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
3123 mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
3124 mac->ledctl_mode1 = mac->ledctl_default;
3125 mac->ledctl_mode2 = mac->ledctl_default;
3127 for (i = 0; i < 4; i++) {
3128 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
3131 case ID_LED_ON1_DEF2:
3132 case ID_LED_ON1_ON2:
3133 case ID_LED_ON1_OFF2:
3134 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3135 mac->ledctl_mode1 |= (ledctl_on << shift);
3137 case ID_LED_OFF1_DEF2:
3138 case ID_LED_OFF1_ON2:
3139 case ID_LED_OFF1_OFF2:
3140 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3141 mac->ledctl_mode1 |= (ledctl_off << shift);
3148 case ID_LED_DEF1_ON2:
3149 case ID_LED_ON1_ON2:
3150 case ID_LED_OFF1_ON2:
3151 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3152 mac->ledctl_mode2 |= (ledctl_on << shift);
3154 case ID_LED_DEF1_OFF2:
3155 case ID_LED_ON1_OFF2:
3156 case ID_LED_OFF1_OFF2:
3157 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3158 mac->ledctl_mode2 |= (ledctl_off << shift);
3171 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
3172 * @hw: pointer to the HW structure
3174 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
3175 * register, so the the bus width is hard coded.
3177 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
3179 struct e1000_bus_info *bus = &hw->bus;
3182 DEBUGFUNC("e1000_get_bus_info_ich8lan");
3184 ret_val = e1000_get_bus_info_pcie_generic(hw);
3187 * ICH devices are "PCI Express"-ish. They have
3188 * a configuration space, but do not contain
3189 * PCI Express Capability registers, so bus width
3190 * must be hardcoded.
3192 if (bus->width == e1000_bus_width_unknown)
3193 bus->width = e1000_bus_width_pcie_x1;
3199 * e1000_reset_hw_ich8lan - Reset the hardware
3200 * @hw: pointer to the HW structure
3202 * Does a full reset of the hardware which includes a reset of the PHY and
3205 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3207 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3212 DEBUGFUNC("e1000_reset_hw_ich8lan");
3215 * Prevent the PCI-E bus from sticking if there is no TLP connection
3216 * on the last TLP read/write transaction when MAC is reset.
3218 ret_val = e1000_disable_pcie_master_generic(hw);
3220 DEBUGOUT("PCI-E Master disable polling has failed.\n");
3222 DEBUGOUT("Masking off all interrupts\n");
3223 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3226 * Disable the Transmit and Receive units. Then delay to allow
3227 * any pending transactions to complete before we hit the MAC
3228 * with the global reset.
3230 E1000_WRITE_REG(hw, E1000_RCTL, 0);
3231 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
3232 E1000_WRITE_FLUSH(hw);
3236 /* Workaround for ICH8 bit corruption issue in FIFO memory */
3237 if (hw->mac.type == e1000_ich8lan) {
3238 /* Set Tx and Rx buffer allocation to 8k apiece. */
3239 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
3240 /* Set Packet Buffer Size to 16k. */
3241 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
3244 if (hw->mac.type == e1000_pchlan) {
3245 /* Save the NVM K1 bit setting*/
3246 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, ®);
3250 if (reg & E1000_NVM_K1_ENABLE)
3251 dev_spec->nvm_k1_enabled = TRUE;
3253 dev_spec->nvm_k1_enabled = FALSE;
3256 ctrl = E1000_READ_REG(hw, E1000_CTRL);
3258 if (!hw->phy.ops.check_reset_block(hw)) {
3260 * Full-chip reset requires MAC and PHY reset at the same
3261 * time to make sure the interface between MAC and the
3262 * external PHY is reset.
3264 ctrl |= E1000_CTRL_PHY_RST;
3267 * Gate automatic PHY configuration by hardware on
3270 if ((hw->mac.type == e1000_pch2lan) &&
3271 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3272 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
3274 ret_val = e1000_acquire_swflag_ich8lan(hw);
3275 DEBUGOUT("Issuing a global reset to ich8lan\n");
3276 E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
3279 if (ctrl & E1000_CTRL_PHY_RST) {
3280 ret_val = hw->phy.ops.get_cfg_done(hw);
3284 ret_val = e1000_post_phy_reset_ich8lan(hw);
3290 * For PCH, this write will make sure that any noise
3291 * will be detected as a CRC error and be dropped rather than show up
3292 * as a bad packet to the DMA engine.
3294 if (hw->mac.type == e1000_pchlan)
3295 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
3297 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3298 E1000_READ_REG(hw, E1000_ICR);
3300 kab = E1000_READ_REG(hw, E1000_KABGTXD);
3301 kab |= E1000_KABGTXD_BGSQLBIAS;
3302 E1000_WRITE_REG(hw, E1000_KABGTXD, kab);
3309 * e1000_init_hw_ich8lan - Initialize the hardware
3310 * @hw: pointer to the HW structure
3312 * Prepares the hardware for transmit and receive by doing the following:
3313 * - initialize hardware bits
3314 * - initialize LED identification
3315 * - setup receive address registers
3316 * - setup flow control
3317 * - setup transmit descriptors
3318 * - clear statistics
3320 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3322 struct e1000_mac_info *mac = &hw->mac;
3323 u32 ctrl_ext, txdctl, snoop;
3327 DEBUGFUNC("e1000_init_hw_ich8lan");
3329 e1000_initialize_hw_bits_ich8lan(hw);
3331 /* Initialize identification LED */
3332 ret_val = mac->ops.id_led_init(hw);
3334 DEBUGOUT("Error initializing identification LED\n");
3335 /* This is not fatal and we should not stop init due to this */
3337 /* Setup the receive address. */
3338 e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
3340 /* Zero out the Multicast HASH table */
3341 DEBUGOUT("Zeroing the MTA\n");
3342 for (i = 0; i < mac->mta_reg_count; i++)
3343 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
3346 * The 82578 Rx buffer will stall if wakeup is enabled in host and
3347 * the ME. Disable wakeup by clearing the host wakeup bit.
3348 * Reset the phy after disabling host wakeup to reset the Rx buffer.
3350 if (hw->phy.type == e1000_phy_82578) {
3351 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
3352 i &= ~BM_WUC_HOST_WU_BIT;
3353 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
3354 ret_val = e1000_phy_hw_reset_ich8lan(hw);
3359 /* Setup link and flow control */
3360 ret_val = mac->ops.setup_link(hw);
3362 /* Set the transmit descriptor write-back policy for both queues */
3363 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
3364 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3365 E1000_TXDCTL_FULL_TX_DESC_WB;
3366 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3367 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3368 E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
3369 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
3370 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3371 E1000_TXDCTL_FULL_TX_DESC_WB;
3372 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3373 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3374 E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
3377 * ICH8 has opposite polarity of no_snoop bits.
3378 * By default, we should use snoop behavior.
3380 if (mac->type == e1000_ich8lan)
3381 snoop = PCIE_ICH8_SNOOP_ALL;
3383 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
3384 e1000_set_pcie_no_snoop_generic(hw, snoop);
3386 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
3387 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
3388 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
3391 * Clear all of the statistics registers (clear on read). It is
3392 * important that we do this after we have tried to establish link
3393 * because the symbol error count will increment wildly if there
3396 e1000_clear_hw_cntrs_ich8lan(hw);
3401 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
3402 * @hw: pointer to the HW structure
3404 * Sets/Clears required hardware bits necessary for correctly setting up the
3405 * hardware for transmit and receive.
3407 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
3411 DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
3413 /* Extended Device Control */
3414 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
3416 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3417 if (hw->mac.type >= e1000_pchlan)
3418 reg |= E1000_CTRL_EXT_PHYPDEN;
3419 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
3421 /* Transmit Descriptor Control 0 */
3422 reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
3424 E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
3426 /* Transmit Descriptor Control 1 */
3427 reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
3429 E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
3431 /* Transmit Arbitration Control 0 */
3432 reg = E1000_READ_REG(hw, E1000_TARC(0));
3433 if (hw->mac.type == e1000_ich8lan)
3434 reg |= (1 << 28) | (1 << 29);
3435 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
3436 E1000_WRITE_REG(hw, E1000_TARC(0), reg);
3438 /* Transmit Arbitration Control 1 */
3439 reg = E1000_READ_REG(hw, E1000_TARC(1));
3440 if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
3444 reg |= (1 << 24) | (1 << 26) | (1 << 30);
3445 E1000_WRITE_REG(hw, E1000_TARC(1), reg);
3448 if (hw->mac.type == e1000_ich8lan) {
3449 reg = E1000_READ_REG(hw, E1000_STATUS);
3451 E1000_WRITE_REG(hw, E1000_STATUS, reg);
3455 * work-around descriptor data corruption issue during nfs v2 udp
3456 * traffic, just disable the nfs filtering capability
3458 reg = E1000_READ_REG(hw, E1000_RFCTL);
3459 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
3460 E1000_WRITE_REG(hw, E1000_RFCTL, reg);
3466 * e1000_setup_link_ich8lan - Setup flow control and link settings
3467 * @hw: pointer to the HW structure
3469 * Determines which flow control settings to use, then configures flow
3470 * control. Calls the appropriate media-specific link configuration
3471 * function. Assuming the adapter has a valid link partner, a valid link
3472 * should be established. Assumes the hardware has previously been reset
3473 * and the transmitter and receiver are not enabled.
3475 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3477 s32 ret_val = E1000_SUCCESS;
3479 DEBUGFUNC("e1000_setup_link_ich8lan");
3481 if (hw->phy.ops.check_reset_block(hw))
3485 * ICH parts do not have a word in the NVM to determine
3486 * the default flow control setting, so we explicitly
3489 if (hw->fc.requested_mode == e1000_fc_default)
3490 hw->fc.requested_mode = e1000_fc_full;
3493 * Save off the requested flow control mode for use later. Depending
3494 * on the link partner's capabilities, we may or may not use this mode.
3496 hw->fc.current_mode = hw->fc.requested_mode;
3498 DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
3499 hw->fc.current_mode);
3501 /* Continue to configure the copper link. */
3502 ret_val = hw->mac.ops.setup_physical_interface(hw);
3506 E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
3507 if ((hw->phy.type == e1000_phy_82578) ||
3508 (hw->phy.type == e1000_phy_82579) ||
3509 (hw->phy.type == e1000_phy_82577)) {
3510 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
3512 ret_val = hw->phy.ops.write_reg(hw,
3513 PHY_REG(BM_PORT_CTRL_PAGE, 27),
3519 ret_val = e1000_set_fc_watermarks_generic(hw);
3526 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
3527 * @hw: pointer to the HW structure
3529 * Configures the kumeran interface to the PHY to wait the appropriate time
3530 * when polling the PHY, then call the generic setup_copper_link to finish
3531 * configuring the copper link.
3533 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3539 DEBUGFUNC("e1000_setup_copper_link_ich8lan");
3541 ctrl = E1000_READ_REG(hw, E1000_CTRL);
3542 ctrl |= E1000_CTRL_SLU;
3543 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
3544 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
3547 * Set the mac to wait the maximum time between each iteration
3548 * and increase the max iterations when polling the phy;
3549 * this fixes erroneous timeouts at 10Mbps.
3551 ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
3555 ret_val = e1000_read_kmrn_reg_generic(hw,
3556 E1000_KMRNCTRLSTA_INBAND_PARAM,
3561 ret_val = e1000_write_kmrn_reg_generic(hw,
3562 E1000_KMRNCTRLSTA_INBAND_PARAM,
3567 switch (hw->phy.type) {
3568 case e1000_phy_igp_3:
3569 ret_val = e1000_copper_link_setup_igp(hw);
3574 case e1000_phy_82578:
3575 ret_val = e1000_copper_link_setup_m88(hw);
3579 case e1000_phy_82577:
3580 case e1000_phy_82579:
3581 ret_val = e1000_copper_link_setup_82577(hw);
3586 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
3591 reg_data &= ~IFE_PMC_AUTO_MDIX;
3593 switch (hw->phy.mdix) {
3595 reg_data &= ~IFE_PMC_FORCE_MDIX;
3598 reg_data |= IFE_PMC_FORCE_MDIX;
3602 reg_data |= IFE_PMC_AUTO_MDIX;
3605 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
3613 ret_val = e1000_setup_copper_link_generic(hw);
3620 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
3621 * @hw: pointer to the HW structure
3622 * @speed: pointer to store current link speed
3623 * @duplex: pointer to store the current link duplex
3625 * Calls the generic get_speed_and_duplex to retrieve the current link
3626 * information and then calls the Kumeran lock loss workaround for links at
3629 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
3634 DEBUGFUNC("e1000_get_link_up_info_ich8lan");
3636 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
3640 if ((hw->mac.type == e1000_ich8lan) &&
3641 (hw->phy.type == e1000_phy_igp_3) &&
3642 (*speed == SPEED_1000)) {
3643 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
3651 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
3652 * @hw: pointer to the HW structure
3654 * Work-around for 82566 Kumeran PCS lock loss:
3655 * On link status change (i.e. PCI reset, speed change) and link is up and
3657 * 0) if workaround is optionally disabled do nothing
3658 * 1) wait 1ms for Kumeran link to come up
3659 * 2) check Kumeran Diagnostic register PCS lock loss bit
3660 * 3) if not set the link is locked (all is good), otherwise...
3662 * 5) repeat up to 10 times
3663 * Note: this is only called for IGP3 copper when speed is 1gb.
3665 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
3667 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3669 s32 ret_val = E1000_SUCCESS;
3673 DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
3675 if (!dev_spec->kmrn_lock_loss_workaround_enabled)
3679 * Make sure link is up before proceeding. If not just return.
3680 * Attempting this while link is negotiating fouled up link
3683 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
3685 ret_val = E1000_SUCCESS;
3689 for (i = 0; i < 10; i++) {
3690 /* read once to clear */
3691 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3694 /* and again to get new status */
3695 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3699 /* check for PCS lock */
3700 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) {
3701 ret_val = E1000_SUCCESS;
3705 /* Issue PHY reset */
3706 hw->phy.ops.reset(hw);
3709 /* Disable GigE link negotiation */
3710 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3711 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
3712 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3713 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3716 * Call gig speed drop workaround on Gig disable before accessing
3719 e1000_gig_downshift_workaround_ich8lan(hw);
3721 /* unable to acquire PCS lock */
3722 ret_val = -E1000_ERR_PHY;
3729 * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
3730 * @hw: pointer to the HW structure
3731 * @state: boolean value used to set the current Kumeran workaround state
3733 * If ICH8, set the current Kumeran workaround state (enabled - TRUE
3734 * /disabled - FALSE).
3736 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
3739 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3741 DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
3743 if (hw->mac.type != e1000_ich8lan) {
3744 DEBUGOUT("Workaround applies to ICH8 only.\n");
3748 dev_spec->kmrn_lock_loss_workaround_enabled = state;
3754 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
3755 * @hw: pointer to the HW structure
3757 * Workaround for 82566 power-down on D3 entry:
3758 * 1) disable gigabit link
3759 * 2) write VR power-down enable
3761 * Continue if successful, else issue LCD reset and repeat
3763 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
3769 DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
3771 if (hw->phy.type != e1000_phy_igp_3)
3774 /* Try the workaround twice (if needed) */
3777 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
3778 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
3779 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3780 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
3783 * Call gig speed drop workaround on Gig disable before
3784 * accessing any PHY registers
3786 if (hw->mac.type == e1000_ich8lan)
3787 e1000_gig_downshift_workaround_ich8lan(hw);
3789 /* Write VR power-down enable */
3790 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3791 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3792 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
3793 data | IGP3_VR_CTRL_MODE_SHUTDOWN);
3795 /* Read it back and test */
3796 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3797 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3798 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
3801 /* Issue PHY reset and repeat at most one more time */
3802 reg = E1000_READ_REG(hw, E1000_CTRL);
3803 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
3812 * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
3813 * @hw: pointer to the HW structure
3815 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
3816 * LPLU, Gig disable, MDIC PHY reset):
3817 * 1) Set Kumeran Near-end loopback
3818 * 2) Clear Kumeran Near-end loopback
3819 * Should only be called for ICH8[m] devices with IGP_3 Phy.
3821 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3823 s32 ret_val = E1000_SUCCESS;
3826 DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
3828 if ((hw->mac.type != e1000_ich8lan) ||
3829 (hw->phy.type != e1000_phy_igp_3))
3832 ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3836 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
3837 ret_val = e1000_write_kmrn_reg_generic(hw,
3838 E1000_KMRNCTRLSTA_DIAG_OFFSET,
3842 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
3843 ret_val = e1000_write_kmrn_reg_generic(hw,
3844 E1000_KMRNCTRLSTA_DIAG_OFFSET,
3851 * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
3852 * @hw: pointer to the HW structure
3854 * During S0 to Sx transition, it is possible the link remains at gig
3855 * instead of negotiating to a lower speed. Before going to Sx, set
3856 * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation
3857 * to a lower speed. For PCH and newer parts, the OEM bits PHY register
3858 * (LED, GbE disable and LPLU configurations) also needs to be written.
3860 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
3865 DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
3867 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3868 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE;
3869 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3871 if (hw->mac.type >= e1000_pchlan) {
3872 e1000_oem_bits_config_ich8lan(hw, FALSE);
3873 ret_val = hw->phy.ops.acquire(hw);
3876 e1000_write_smbus_addr(hw);
3877 hw->phy.ops.release(hw);
3884 * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
3885 * @hw: pointer to the HW structure
3887 * During Sx to S0 transitions on non-managed devices or managed devices
3888 * on which PHY resets are not blocked, if the PHY registers cannot be
3889 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
3892 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
3896 DEBUGFUNC("e1000_resume_workarounds_pchlan");
3898 if (hw->mac.type != e1000_pch2lan)
3901 fwsm = E1000_READ_REG(hw, E1000_FWSM);
3902 if (!(fwsm & E1000_ICH_FWSM_FW_VALID) ||
3903 !hw->phy.ops.check_reset_block(hw)) {
3904 u16 phy_id1, phy_id2;
3907 ret_val = hw->phy.ops.acquire(hw);
3909 DEBUGOUT("Failed to acquire PHY semaphore in resume\n");
3913 /* Test access to the PHY registers by reading the ID regs */
3914 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1);
3917 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2);
3921 if (hw->phy.id == ((u32)(phy_id1 << 16) |
3922 (u32)(phy_id2 & PHY_REVISION_MASK)))
3925 e1000_toggle_lanphypc_value_ich8lan(hw);
3927 hw->phy.ops.release(hw);
3929 hw->phy.ops.reset(hw);
3935 hw->phy.ops.release(hw);
3941 * e1000_cleanup_led_ich8lan - Restore the default LED operation
3942 * @hw: pointer to the HW structure
3944 * Return the LED back to the default configuration.
3946 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
3948 DEBUGFUNC("e1000_cleanup_led_ich8lan");
3950 if (hw->phy.type == e1000_phy_ife)
3951 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3954 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
3955 return E1000_SUCCESS;
3959 * e1000_led_on_ich8lan - Turn LEDs on
3960 * @hw: pointer to the HW structure
3964 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
3966 DEBUGFUNC("e1000_led_on_ich8lan");
3968 if (hw->phy.type == e1000_phy_ife)
3969 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3970 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
3972 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
3973 return E1000_SUCCESS;
3977 * e1000_led_off_ich8lan - Turn LEDs off
3978 * @hw: pointer to the HW structure
3980 * Turn off the LEDs.
3982 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
3984 DEBUGFUNC("e1000_led_off_ich8lan");
3986 if (hw->phy.type == e1000_phy_ife)
3987 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3988 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
3990 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
3991 return E1000_SUCCESS;
3995 * e1000_setup_led_pchlan - Configures SW controllable LED
3996 * @hw: pointer to the HW structure
3998 * This prepares the SW controllable LED for use.
4000 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
4002 DEBUGFUNC("e1000_setup_led_pchlan");
4004 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
4005 (u16)hw->mac.ledctl_mode1);
4009 * e1000_cleanup_led_pchlan - Restore the default LED operation
4010 * @hw: pointer to the HW structure
4012 * Return the LED back to the default configuration.
4014 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
4016 DEBUGFUNC("e1000_cleanup_led_pchlan");
4018 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
4019 (u16)hw->mac.ledctl_default);
4023 * e1000_led_on_pchlan - Turn LEDs on
4024 * @hw: pointer to the HW structure
4028 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
4030 u16 data = (u16)hw->mac.ledctl_mode2;
4033 DEBUGFUNC("e1000_led_on_pchlan");
4036 * If no link, then turn LED on by setting the invert bit
4037 * for each LED that's mode is "link_up" in ledctl_mode2.
4039 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4040 for (i = 0; i < 3; i++) {
4041 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4042 if ((led & E1000_PHY_LED0_MODE_MASK) !=
4043 E1000_LEDCTL_MODE_LINK_UP)
4045 if (led & E1000_PHY_LED0_IVRT)
4046 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4048 data |= (E1000_PHY_LED0_IVRT << (i * 5));
4052 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4056 * e1000_led_off_pchlan - Turn LEDs off
4057 * @hw: pointer to the HW structure
4059 * Turn off the LEDs.
4061 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
4063 u16 data = (u16)hw->mac.ledctl_mode1;
4066 DEBUGFUNC("e1000_led_off_pchlan");
4069 * If no link, then turn LED off by clearing the invert bit
4070 * for each LED that's mode is "link_up" in ledctl_mode1.
4072 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4073 for (i = 0; i < 3; i++) {
4074 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4075 if ((led & E1000_PHY_LED0_MODE_MASK) !=
4076 E1000_LEDCTL_MODE_LINK_UP)
4078 if (led & E1000_PHY_LED0_IVRT)
4079 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4081 data |= (E1000_PHY_LED0_IVRT << (i * 5));
4085 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4089 * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
4090 * @hw: pointer to the HW structure
4092 * Read appropriate register for the config done bit for completion status
4093 * and configure the PHY through s/w for EEPROM-less parts.
4095 * NOTE: some silicon which is EEPROM-less will fail trying to read the
4096 * config done bit, so only an error is logged and continues. If we were
4097 * to return with error, EEPROM-less silicon would not be able to be reset
4100 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
4102 s32 ret_val = E1000_SUCCESS;
4106 DEBUGFUNC("e1000_get_cfg_done_ich8lan");
4108 e1000_get_cfg_done_generic(hw);
4110 /* Wait for indication from h/w that it has completed basic config */
4111 if (hw->mac.type >= e1000_ich10lan) {
4112 e1000_lan_init_done_ich8lan(hw);
4114 ret_val = e1000_get_auto_rd_done_generic(hw);
4117 * When auto config read does not complete, do not
4118 * return with an error. This can happen in situations
4119 * where there is no eeprom and prevents getting link.
4121 DEBUGOUT("Auto Read Done did not complete\n");
4122 ret_val = E1000_SUCCESS;
4126 /* Clear PHY Reset Asserted bit */
4127 status = E1000_READ_REG(hw, E1000_STATUS);
4128 if (status & E1000_STATUS_PHYRA)
4129 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
4131 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
4133 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
4134 if (hw->mac.type <= e1000_ich9lan) {
4135 if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
4136 (hw->phy.type == e1000_phy_igp_3)) {
4137 e1000_phy_init_script_igp3(hw);
4140 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
4141 /* Maybe we should do a basic PHY config */
4142 DEBUGOUT("EEPROM not present\n");
4143 ret_val = -E1000_ERR_CONFIG;
4151 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
4152 * @hw: pointer to the HW structure
4154 * In the case of a PHY power down to save power, or to turn off link during a
4155 * driver unload, or wake on lan is not enabled, remove the link.
4157 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
4159 /* If the management interface is not enabled, then power down */
4160 if (!(hw->mac.ops.check_mng_mode(hw) ||
4161 hw->phy.ops.check_reset_block(hw)))
4162 e1000_power_down_phy_copper(hw);
4168 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
4169 * @hw: pointer to the HW structure
4171 * Clears hardware counters specific to the silicon family and calls
4172 * clear_hw_cntrs_generic to clear all general purpose counters.
4174 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
4179 DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
4181 e1000_clear_hw_cntrs_base_generic(hw);
4183 E1000_READ_REG(hw, E1000_ALGNERRC);
4184 E1000_READ_REG(hw, E1000_RXERRC);
4185 E1000_READ_REG(hw, E1000_TNCRS);
4186 E1000_READ_REG(hw, E1000_CEXTERR);
4187 E1000_READ_REG(hw, E1000_TSCTC);
4188 E1000_READ_REG(hw, E1000_TSCTFC);
4190 E1000_READ_REG(hw, E1000_MGTPRC);
4191 E1000_READ_REG(hw, E1000_MGTPDC);
4192 E1000_READ_REG(hw, E1000_MGTPTC);
4194 E1000_READ_REG(hw, E1000_IAC);
4195 E1000_READ_REG(hw, E1000_ICRXOC);
4197 /* Clear PHY statistics registers */
4198 if ((hw->phy.type == e1000_phy_82578) ||
4199 (hw->phy.type == e1000_phy_82579) ||
4200 (hw->phy.type == e1000_phy_82577)) {
4201 ret_val = hw->phy.ops.acquire(hw);
4204 ret_val = hw->phy.ops.set_page(hw,
4205 HV_STATS_PAGE << IGP_PAGE_SHIFT);
4208 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
4209 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
4210 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
4211 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
4212 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4213 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
4214 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4215 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
4216 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4217 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
4218 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4219 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
4220 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4221 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
4223 hw->phy.ops.release(hw);