1 /******************************************************************************
3 Copyright (c) 2001-2009, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 * 82562G 10/100 Network Connection
37 * 82562G-2 10/100 Network Connection
38 * 82562GT 10/100 Network Connection
39 * 82562GT-2 10/100 Network Connection
40 * 82562V 10/100 Network Connection
41 * 82562V-2 10/100 Network Connection
42 * 82566DC-2 Gigabit Network Connection
43 * 82566DC Gigabit Network Connection
44 * 82566DM-2 Gigabit Network Connection
45 * 82566DM Gigabit Network Connection
46 * 82566MC Gigabit Network Connection
47 * 82566MM Gigabit Network Connection
48 * 82567LM Gigabit Network Connection
49 * 82567LF Gigabit Network Connection
50 * 82567V Gigabit Network Connection
51 * 82567LM-2 Gigabit Network Connection
52 * 82567LF-2 Gigabit Network Connection
53 * 82567V-2 Gigabit Network Connection
54 * 82567LF-3 Gigabit Network Connection
55 * 82567LM-3 Gigabit Network Connection
56 * 82567LM-4 Gigabit Network Connection
57 * 82577LM Gigabit Network Connection
58 * 82577LC Gigabit Network Connection
59 * 82578DM Gigabit Network Connection
60 * 82578DC Gigabit Network Connection
61 * 82579LM Gigabit Network Connection
62 * 82579V Gigabit Network Connection
65 #include "e1000_api.h"
67 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw);
68 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw);
69 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw);
70 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw);
71 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
72 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
73 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
74 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
75 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
76 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
77 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
78 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
79 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
80 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
81 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
83 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
85 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
86 u16 words, u16 *data);
87 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
88 u16 words, u16 *data);
89 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
90 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
91 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
93 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
94 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
95 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw);
96 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw);
97 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
98 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
99 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
100 u16 *speed, u16 *duplex);
101 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
102 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
103 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
104 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
105 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
106 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
107 static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
108 static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
109 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
110 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
111 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout);
112 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw);
113 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
114 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
115 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
116 u32 offset, u8 *data);
117 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
119 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
120 u32 offset, u16 *data);
121 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
122 u32 offset, u8 byte);
123 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw,
124 u32 offset, u8 data);
125 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
127 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
128 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
129 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
130 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
131 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
132 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
133 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
134 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
136 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
137 /* Offset 04h HSFSTS */
138 union ich8_hws_flash_status {
140 u16 flcdone :1; /* bit 0 Flash Cycle Done */
141 u16 flcerr :1; /* bit 1 Flash Cycle Error */
142 u16 dael :1; /* bit 2 Direct Access error Log */
143 u16 berasesz :2; /* bit 4:3 Sector Erase Size */
144 u16 flcinprog :1; /* bit 5 flash cycle in Progress */
145 u16 reserved1 :2; /* bit 13:6 Reserved */
146 u16 reserved2 :6; /* bit 13:6 Reserved */
147 u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
148 u16 flockdn :1; /* bit 15 Flash Config Lock-Down */
153 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
154 /* Offset 06h FLCTL */
155 union ich8_hws_flash_ctrl {
156 struct ich8_hsflctl {
157 u16 flcgo :1; /* 0 Flash Cycle Go */
158 u16 flcycle :2; /* 2:1 Flash Cycle */
159 u16 reserved :5; /* 7:3 Reserved */
160 u16 fldbcount :2; /* 9:8 Flash Data Byte Count */
161 u16 flockdn :6; /* 15:10 Reserved */
166 /* ICH Flash Region Access Permissions */
167 union ich8_hws_flash_regacc {
169 u32 grra :8; /* 0:7 GbE region Read Access */
170 u32 grwa :8; /* 8:15 GbE region Write Access */
171 u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */
172 u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */
178 * e1000_init_phy_params_pchlan - Initialize PHY function pointers
179 * @hw: pointer to the HW structure
181 * Initialize family-specific PHY parameters and function pointers.
183 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
185 struct e1000_phy_info *phy = &hw->phy;
187 s32 ret_val = E1000_SUCCESS;
189 DEBUGFUNC("e1000_init_phy_params_pchlan");
192 phy->reset_delay_us = 100;
194 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
195 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
196 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
197 phy->ops.read_reg = e1000_read_phy_reg_hv;
198 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
199 phy->ops.release = e1000_release_swflag_ich8lan;
200 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
201 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
202 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
203 phy->ops.write_reg = e1000_write_phy_reg_hv;
204 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
205 phy->ops.power_up = e1000_power_up_phy_copper;
206 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
207 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
210 * The MAC-PHY interconnect may still be in SMBus mode
211 * after Sx->S0. If the manageability engine (ME) is
212 * disabled, then toggle the LANPHYPC Value bit to force
213 * the interconnect to PCIe mode.
215 fwsm = E1000_READ_REG(hw, E1000_FWSM);
216 if (!(fwsm & E1000_ICH_FWSM_FW_VALID) &&
217 !(hw->phy.ops.check_reset_block(hw))) {
218 ctrl = E1000_READ_REG(hw, E1000_CTRL);
219 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
220 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
221 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
223 ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
224 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
228 * Gate automatic PHY configuration by hardware on
231 if (hw->mac.type == e1000_pch2lan)
232 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
236 * Reset the PHY before any acccess to it. Doing so, ensures that
237 * the PHY is in a known good state before we read/write PHY registers.
238 * The generic reset is sufficient here, because we haven't determined
241 ret_val = e1000_phy_hw_reset_generic(hw);
245 /* Ungate automatic PHY configuration on non-managed 82579 */
246 if ((hw->mac.type == e1000_pch2lan) &&
247 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
249 e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
252 phy->id = e1000_phy_unknown;
253 switch (hw->mac.type) {
255 ret_val = e1000_get_phy_id(hw);
258 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
263 * In case the PHY needs to be in mdio slow mode,
264 * set slow mode and try to get the PHY id again.
266 ret_val = e1000_set_mdio_slow_mode_hv(hw);
269 ret_val = e1000_get_phy_id(hw);
274 phy->type = e1000_get_phy_type_from_id(phy->id);
277 case e1000_phy_82577:
278 case e1000_phy_82579:
279 phy->ops.check_polarity = e1000_check_polarity_82577;
280 phy->ops.force_speed_duplex =
281 e1000_phy_force_speed_duplex_82577;
282 phy->ops.get_cable_length = e1000_get_cable_length_82577;
283 phy->ops.get_info = e1000_get_phy_info_82577;
284 phy->ops.commit = e1000_phy_sw_reset_generic;
286 case e1000_phy_82578:
287 phy->ops.check_polarity = e1000_check_polarity_m88;
288 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
289 phy->ops.get_cable_length = e1000_get_cable_length_m88;
290 phy->ops.get_info = e1000_get_phy_info_m88;
293 ret_val = -E1000_ERR_PHY;
302 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
303 * @hw: pointer to the HW structure
305 * Initialize family-specific PHY parameters and function pointers.
307 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
309 struct e1000_phy_info *phy = &hw->phy;
310 s32 ret_val = E1000_SUCCESS;
313 DEBUGFUNC("e1000_init_phy_params_ich8lan");
316 phy->reset_delay_us = 100;
318 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
319 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
320 phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
321 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
322 phy->ops.read_reg = e1000_read_phy_reg_igp;
323 phy->ops.release = e1000_release_swflag_ich8lan;
324 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
325 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
326 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
327 phy->ops.write_reg = e1000_write_phy_reg_igp;
328 phy->ops.power_up = e1000_power_up_phy_copper;
329 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
332 * We may need to do this twice - once for IGP and if that fails,
333 * we'll set BM func pointers and try again
335 ret_val = e1000_determine_phy_address(hw);
337 phy->ops.write_reg = e1000_write_phy_reg_bm;
338 phy->ops.read_reg = e1000_read_phy_reg_bm;
339 ret_val = e1000_determine_phy_address(hw);
341 DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
347 while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
350 ret_val = e1000_get_phy_id(hw);
357 case IGP03E1000_E_PHY_ID:
358 phy->type = e1000_phy_igp_3;
359 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
360 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
361 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
362 phy->ops.get_info = e1000_get_phy_info_igp;
363 phy->ops.check_polarity = e1000_check_polarity_igp;
364 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
367 case IFE_PLUS_E_PHY_ID:
369 phy->type = e1000_phy_ife;
370 phy->autoneg_mask = E1000_ALL_NOT_GIG;
371 phy->ops.get_info = e1000_get_phy_info_ife;
372 phy->ops.check_polarity = e1000_check_polarity_ife;
373 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
375 case BME1000_E_PHY_ID:
376 phy->type = e1000_phy_bm;
377 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
378 phy->ops.read_reg = e1000_read_phy_reg_bm;
379 phy->ops.write_reg = e1000_write_phy_reg_bm;
380 phy->ops.commit = e1000_phy_sw_reset_generic;
381 phy->ops.get_info = e1000_get_phy_info_m88;
382 phy->ops.check_polarity = e1000_check_polarity_m88;
383 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
386 ret_val = -E1000_ERR_PHY;
395 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
396 * @hw: pointer to the HW structure
398 * Initialize family-specific NVM parameters and function
401 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
403 struct e1000_nvm_info *nvm = &hw->nvm;
404 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
405 u32 gfpreg, sector_base_addr, sector_end_addr;
406 s32 ret_val = E1000_SUCCESS;
409 DEBUGFUNC("e1000_init_nvm_params_ich8lan");
411 /* Can't read flash registers if the register set isn't mapped. */
412 if (!hw->flash_address) {
413 DEBUGOUT("ERROR: Flash registers not mapped\n");
414 ret_val = -E1000_ERR_CONFIG;
418 nvm->type = e1000_nvm_flash_sw;
420 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
423 * sector_X_addr is a "sector"-aligned address (4096 bytes)
424 * Add 1 to sector_end_addr since this sector is included in
427 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
428 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
430 /* flash_base_addr is byte-aligned */
431 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
434 * find total size of the NVM, then cut in half since the total
435 * size represents two separate NVM banks.
437 nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
438 << FLASH_SECTOR_ADDR_SHIFT;
439 nvm->flash_bank_size /= 2;
440 /* Adjust to word count */
441 nvm->flash_bank_size /= sizeof(u16);
443 nvm->word_size = E1000_SHADOW_RAM_WORDS;
445 /* Clear shadow ram */
446 for (i = 0; i < nvm->word_size; i++) {
447 dev_spec->shadow_ram[i].modified = FALSE;
448 dev_spec->shadow_ram[i].value = 0xFFFF;
451 /* Function Pointers */
452 nvm->ops.acquire = e1000_acquire_nvm_ich8lan;
453 nvm->ops.release = e1000_release_nvm_ich8lan;
454 nvm->ops.read = e1000_read_nvm_ich8lan;
455 nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
456 nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
457 nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan;
458 nvm->ops.write = e1000_write_nvm_ich8lan;
465 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
466 * @hw: pointer to the HW structure
468 * Initialize family-specific MAC parameters and function
471 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
473 struct e1000_mac_info *mac = &hw->mac;
476 DEBUGFUNC("e1000_init_mac_params_ich8lan");
478 /* Set media type function pointer */
479 hw->phy.media_type = e1000_media_type_copper;
481 /* Set mta register count */
482 mac->mta_reg_count = 32;
483 /* Set rar entry count */
484 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
485 if (mac->type == e1000_ich8lan)
486 mac->rar_entry_count--;
487 /* Set if part includes ASF firmware */
488 mac->asf_firmware_present = TRUE;
490 mac->has_fwsm = TRUE;
491 /* ARC subsystem not supported */
492 mac->arc_subsystem_valid = FALSE;
493 /* Adaptive IFS supported */
494 mac->adaptive_ifs = TRUE;
496 /* Function pointers */
498 /* bus type/speed/width */
499 mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
501 mac->ops.set_lan_id = e1000_set_lan_id_single_port;
503 mac->ops.reset_hw = e1000_reset_hw_ich8lan;
504 /* hw initialization */
505 mac->ops.init_hw = e1000_init_hw_ich8lan;
507 mac->ops.setup_link = e1000_setup_link_ich8lan;
508 /* physical interface setup */
509 mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
511 mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
513 mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
514 /* multicast address update */
515 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
516 /* clear hardware counters */
517 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
524 /* check management mode */
525 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
527 mac->ops.id_led_init = e1000_id_led_init_generic;
529 mac->ops.blink_led = e1000_blink_led_generic;
531 mac->ops.setup_led = e1000_setup_led_generic;
533 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
534 /* turn on/off LED */
535 mac->ops.led_on = e1000_led_on_ich8lan;
536 mac->ops.led_off = e1000_led_off_ich8lan;
539 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
540 mac->ops.rar_set = e1000_rar_set_pch2lan;
543 /* save PCH revision_id */
544 e1000_read_pci_cfg(hw, 0x2, &pci_cfg);
545 hw->revision_id = (u8)(pci_cfg &= 0x000F);
546 /* check management mode */
547 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
549 mac->ops.id_led_init = e1000_id_led_init_pchlan;
551 mac->ops.setup_led = e1000_setup_led_pchlan;
553 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
554 /* turn on/off LED */
555 mac->ops.led_on = e1000_led_on_pchlan;
556 mac->ops.led_off = e1000_led_off_pchlan;
562 /* Enable PCS Lock-loss workaround for ICH8 */
563 if (mac->type == e1000_ich8lan)
564 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
566 /* Gate automatic PHY configuration by hardware on managed 82579 */
567 if ((mac->type == e1000_pch2lan) &&
568 (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
569 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
571 return E1000_SUCCESS;
575 * e1000_set_eee_pchlan - Enable/disable EEE support
576 * @hw: pointer to the HW structure
578 * Enable/disable EEE based on setting in dev_spec structure. The bits in
579 * the LPI Control register will remain set only if/when link is up.
581 static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
583 s32 ret_val = E1000_SUCCESS;
586 DEBUGFUNC("e1000_set_eee_pchlan");
588 if (hw->phy.type != e1000_phy_82579)
591 ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg);
595 if (hw->dev_spec.ich8lan.eee_disable)
596 phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
598 phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
600 ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg);
606 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
607 * @hw: pointer to the HW structure
609 * Checks to see of the link status of the hardware has changed. If a
610 * change in link status has been detected, then we read the PHY registers
611 * to get the current speed/duplex if link exists.
613 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
615 struct e1000_mac_info *mac = &hw->mac;
619 DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
622 * We only want to go out to the PHY registers to see if Auto-Neg
623 * has completed and/or if our link status has changed. The
624 * get_link_status flag is set upon receiving a Link Status
625 * Change or Rx Sequence Error interrupt.
627 if (!mac->get_link_status) {
628 ret_val = E1000_SUCCESS;
633 * First we want to see if the MII Status Register reports
634 * link. If so, then we want to get the current speed/duplex
637 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
641 if (hw->mac.type == e1000_pchlan) {
642 ret_val = e1000_k1_gig_workaround_hv(hw, link);
648 goto out; /* No link detected */
650 mac->get_link_status = FALSE;
652 if (hw->phy.type == e1000_phy_82578) {
653 ret_val = e1000_link_stall_workaround_hv(hw);
658 if (hw->mac.type == e1000_pch2lan) {
659 ret_val = e1000_k1_workaround_lv(hw);
665 * Check if there was DownShift, must be checked
666 * immediately after link-up
668 e1000_check_downshift_generic(hw);
670 /* Enable/Disable EEE after link up */
671 ret_val = e1000_set_eee_pchlan(hw);
676 * If we are forcing speed/duplex, then we simply return since
677 * we have already determined whether we have link or not.
680 ret_val = -E1000_ERR_CONFIG;
685 * Auto-Neg is enabled. Auto Speed Detection takes care
686 * of MAC speed/duplex configuration. So we only need to
687 * configure Collision Distance in the MAC.
689 e1000_config_collision_dist_generic(hw);
692 * Configure Flow Control now that Auto-Neg has completed.
693 * First, we need to restore the desired flow control
694 * settings because we may have had to re-autoneg with a
695 * different link partner.
697 ret_val = e1000_config_fc_after_link_up_generic(hw);
699 DEBUGOUT("Error configuring flow control\n");
706 * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
707 * @hw: pointer to the HW structure
709 * Initialize family-specific function pointers for PHY, MAC, and NVM.
711 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
713 DEBUGFUNC("e1000_init_function_pointers_ich8lan");
715 hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
716 hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
717 switch (hw->mac.type) {
721 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
725 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
733 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
734 * @hw: pointer to the HW structure
736 * Acquires the mutex for performing NVM operations.
738 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
740 DEBUGFUNC("e1000_acquire_nvm_ich8lan");
741 return E1000_SUCCESS;
745 * e1000_release_nvm_ich8lan - Release NVM mutex
746 * @hw: pointer to the HW structure
748 * Releases the mutex used while performing NVM operations.
750 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
752 DEBUGFUNC("e1000_release_nvm_ich8lan");
757 * e1000_acquire_swflag_ich8lan - Acquire software control flag
758 * @hw: pointer to the HW structure
760 * Acquires the software control flag for performing PHY and select
763 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
765 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
766 s32 ret_val = E1000_SUCCESS;
768 DEBUGFUNC("e1000_acquire_swflag_ich8lan");
771 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
772 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
780 DEBUGOUT("SW/FW/HW has locked the resource for too long.\n");
781 ret_val = -E1000_ERR_CONFIG;
785 timeout = SW_FLAG_TIMEOUT;
787 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
788 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
791 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
792 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
800 DEBUGOUT("Failed to acquire the semaphore.\n");
801 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
802 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
803 ret_val = -E1000_ERR_CONFIG;
812 * e1000_release_swflag_ich8lan - Release software control flag
813 * @hw: pointer to the HW structure
815 * Releases the software control flag for performing PHY and select
818 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
822 DEBUGFUNC("e1000_release_swflag_ich8lan");
824 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
825 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
826 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
832 * e1000_check_mng_mode_ich8lan - Checks management mode
833 * @hw: pointer to the HW structure
835 * This checks if the adapter has any manageability enabled.
836 * This is a function pointer entry point only called by read/write
837 * routines for the PHY and NVM parts.
839 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
843 DEBUGFUNC("e1000_check_mng_mode_ich8lan");
845 fwsm = E1000_READ_REG(hw, E1000_FWSM);
847 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
848 ((fwsm & E1000_FWSM_MODE_MASK) ==
849 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
853 * e1000_check_mng_mode_pchlan - Checks management mode
854 * @hw: pointer to the HW structure
856 * This checks if the adapter has iAMT enabled.
857 * This is a function pointer entry point only called by read/write
858 * routines for the PHY and NVM parts.
860 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
864 DEBUGFUNC("e1000_check_mng_mode_pchlan");
866 fwsm = E1000_READ_REG(hw, E1000_FWSM);
868 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
869 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
873 * e1000_rar_set_pch2lan - Set receive address register
874 * @hw: pointer to the HW structure
875 * @addr: pointer to the receive address
876 * @index: receive address array register
878 * Sets the receive address array register at index to the address passed
879 * in by addr. For 82579, RAR[0] is the base address register that is to
880 * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
881 * Use SHRA[0-3] in place of those reserved for ME.
883 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
885 u32 rar_low, rar_high;
887 DEBUGFUNC("e1000_rar_set_pch2lan");
890 * HW expects these in little endian so we reverse the byte order
891 * from network order (big endian) to little endian
893 rar_low = ((u32) addr[0] |
894 ((u32) addr[1] << 8) |
895 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
897 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
899 /* If MAC address zero, no need to set the AV bit */
900 if (rar_low || rar_high)
901 rar_high |= E1000_RAH_AV;
904 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
905 E1000_WRITE_FLUSH(hw);
906 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
907 E1000_WRITE_FLUSH(hw);
911 if (index < hw->mac.rar_entry_count) {
912 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
913 E1000_WRITE_FLUSH(hw);
914 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
915 E1000_WRITE_FLUSH(hw);
917 /* verify the register updates */
918 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
919 (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
922 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
923 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
926 DEBUGOUT1("Failed to write receive address at index %d\n", index);
930 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
931 * @hw: pointer to the HW structure
933 * Checks if firmware is blocking the reset of the PHY.
934 * This is a function pointer entry point only called by
937 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
941 DEBUGFUNC("e1000_check_reset_block_ich8lan");
943 if (hw->phy.reset_disable)
944 return E1000_BLK_PHY_RESET;
946 fwsm = E1000_READ_REG(hw, E1000_FWSM);
948 return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? E1000_SUCCESS
949 : E1000_BLK_PHY_RESET;
953 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
954 * @hw: pointer to the HW structure
956 * Assumes semaphore already acquired.
959 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
962 u32 strap = E1000_READ_REG(hw, E1000_STRAP);
963 s32 ret_val = E1000_SUCCESS;
965 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
967 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
971 phy_data &= ~HV_SMB_ADDR_MASK;
972 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
973 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
974 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
981 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
982 * @hw: pointer to the HW structure
984 * SW should configure the LCD from the NVM extended configuration region
985 * as a workaround for certain parts.
987 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
989 struct e1000_phy_info *phy = &hw->phy;
990 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
991 s32 ret_val = E1000_SUCCESS;
992 u16 word_addr, reg_data, reg_addr, phy_page = 0;
994 DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
997 * Initialize the PHY from the NVM on ICH platforms. This
998 * is needed due to an issue where the NVM configuration is
999 * not properly autoloaded after power transitions.
1000 * Therefore, after each PHY reset, we will load the
1001 * configuration data out of the NVM manually.
1003 switch (hw->mac.type) {
1005 if (phy->type != e1000_phy_igp_3)
1008 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
1009 (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
1010 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
1016 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
1022 ret_val = hw->phy.ops.acquire(hw);
1026 data = E1000_READ_REG(hw, E1000_FEXTNVM);
1027 if (!(data & sw_cfg_mask))
1031 * Make sure HW does not configure LCD from PHY
1032 * extended configuration before SW configuration
1034 data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1035 if (!(hw->mac.type == e1000_pch2lan)) {
1036 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
1040 cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
1041 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
1042 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
1046 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
1047 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
1049 if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
1050 (hw->mac.type == e1000_pchlan)) ||
1051 (hw->mac.type == e1000_pch2lan)) {
1053 * HW configures the SMBus address and LEDs when the
1054 * OEM and LCD Write Enable bits are set in the NVM.
1055 * When both NVM bits are cleared, SW will configure
1058 ret_val = e1000_write_smbus_addr(hw);
1062 data = E1000_READ_REG(hw, E1000_LEDCTL);
1063 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
1069 /* Configure LCD from extended configuration region. */
1071 /* cnf_base_addr is in DWORD */
1072 word_addr = (u16)(cnf_base_addr << 1);
1074 for (i = 0; i < cnf_size; i++) {
1075 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
1080 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
1085 /* Save off the PHY page for future writes. */
1086 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
1087 phy_page = reg_data;
1091 reg_addr &= PHY_REG_MASK;
1092 reg_addr |= phy_page;
1094 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
1101 hw->phy.ops.release(hw);
1106 * e1000_k1_gig_workaround_hv - K1 Si workaround
1107 * @hw: pointer to the HW structure
1108 * @link: link up bool flag
1110 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
1111 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
1112 * If link is down, the function will restore the default K1 setting located
1115 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1117 s32 ret_val = E1000_SUCCESS;
1119 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
1121 DEBUGFUNC("e1000_k1_gig_workaround_hv");
1123 if (hw->mac.type != e1000_pchlan)
1126 /* Wrap the whole flow with the sw flag */
1127 ret_val = hw->phy.ops.acquire(hw);
1131 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
1133 if (hw->phy.type == e1000_phy_82578) {
1134 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
1139 status_reg &= BM_CS_STATUS_LINK_UP |
1140 BM_CS_STATUS_RESOLVED |
1141 BM_CS_STATUS_SPEED_MASK;
1143 if (status_reg == (BM_CS_STATUS_LINK_UP |
1144 BM_CS_STATUS_RESOLVED |
1145 BM_CS_STATUS_SPEED_1000))
1149 if (hw->phy.type == e1000_phy_82577) {
1150 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
1155 status_reg &= HV_M_STATUS_LINK_UP |
1156 HV_M_STATUS_AUTONEG_COMPLETE |
1157 HV_M_STATUS_SPEED_MASK;
1159 if (status_reg == (HV_M_STATUS_LINK_UP |
1160 HV_M_STATUS_AUTONEG_COMPLETE |
1161 HV_M_STATUS_SPEED_1000))
1165 /* Link stall fix for link up */
1166 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1172 /* Link stall fix for link down */
1173 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1179 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
1182 hw->phy.ops.release(hw);
1188 * e1000_configure_k1_ich8lan - Configure K1 power state
1189 * @hw: pointer to the HW structure
1190 * @enable: K1 state to configure
1192 * Configure the K1 power state based on the provided parameter.
1193 * Assumes semaphore already acquired.
1195 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1197 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1199 s32 ret_val = E1000_SUCCESS;
1205 DEBUGFUNC("e1000_configure_k1_ich8lan");
1207 ret_val = e1000_read_kmrn_reg_locked(hw,
1208 E1000_KMRNCTRLSTA_K1_CONFIG,
1214 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
1216 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
1218 ret_val = e1000_write_kmrn_reg_locked(hw,
1219 E1000_KMRNCTRLSTA_K1_CONFIG,
1225 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1226 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1228 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1229 reg |= E1000_CTRL_FRCSPD;
1230 E1000_WRITE_REG(hw, E1000_CTRL, reg);
1232 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1234 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1235 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1243 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
1244 * @hw: pointer to the HW structure
1245 * @d0_state: boolean if entering d0 or d3 device state
1247 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
1248 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
1249 * in NVM determines whether HW should configure LPLU and Gbe Disable.
1251 s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1257 DEBUGFUNC("e1000_oem_bits_config_ich8lan");
1259 if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan))
1262 ret_val = hw->phy.ops.acquire(hw);
1266 if (!(hw->mac.type == e1000_pch2lan)) {
1267 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1268 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1272 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
1273 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
1276 mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
1278 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
1282 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
1285 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
1286 oem_reg |= HV_OEM_BITS_GBE_DIS;
1288 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1289 oem_reg |= HV_OEM_BITS_LPLU;
1291 if (mac_reg & E1000_PHY_CTRL_NOND0A_GBE_DISABLE)
1292 oem_reg |= HV_OEM_BITS_GBE_DIS;
1294 if (mac_reg & E1000_PHY_CTRL_NOND0A_LPLU)
1295 oem_reg |= HV_OEM_BITS_LPLU;
1297 /* Restart auto-neg to activate the bits */
1298 if (!hw->phy.ops.check_reset_block(hw))
1299 oem_reg |= HV_OEM_BITS_RESTART_AN;
1300 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
1303 hw->phy.ops.release(hw);
1310 * e1000_hv_phy_powerdown_workaround_ich8lan - Power down workaround on Sx
1311 * @hw: pointer to the HW structure
1313 s32 e1000_hv_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
1315 DEBUGFUNC("e1000_hv_phy_powerdown_workaround_ich8lan");
1317 if ((hw->phy.type != e1000_phy_82577) || (hw->revision_id > 2))
1318 return E1000_SUCCESS;
1320 return hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0444);
1324 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
1325 * @hw: pointer to the HW structure
1327 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
1332 DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
1334 ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
1338 data |= HV_KMRN_MDIO_SLOW;
1340 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
1346 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1347 * done after every PHY reset.
1349 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1351 s32 ret_val = E1000_SUCCESS;
1354 DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
1356 if (hw->mac.type != e1000_pchlan)
1359 /* Set MDIO slow mode before any other MDIO access */
1360 if (hw->phy.type == e1000_phy_82577) {
1361 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1366 /* Hanksville M Phy init for IEEE. */
1367 if ((hw->revision_id == 2) &&
1368 (hw->phy.type == e1000_phy_82577) &&
1369 ((hw->phy.revision == 2) || (hw->phy.revision == 3))) {
1370 hw->phy.ops.write_reg(hw, 0x10, 0x8823);
1371 hw->phy.ops.write_reg(hw, 0x11, 0x0018);
1372 hw->phy.ops.write_reg(hw, 0x10, 0x8824);
1373 hw->phy.ops.write_reg(hw, 0x11, 0x0016);
1374 hw->phy.ops.write_reg(hw, 0x10, 0x8825);
1375 hw->phy.ops.write_reg(hw, 0x11, 0x001A);
1376 hw->phy.ops.write_reg(hw, 0x10, 0x888C);
1377 hw->phy.ops.write_reg(hw, 0x11, 0x0007);
1378 hw->phy.ops.write_reg(hw, 0x10, 0x888D);
1379 hw->phy.ops.write_reg(hw, 0x11, 0x0007);
1380 hw->phy.ops.write_reg(hw, 0x10, 0x888E);
1381 hw->phy.ops.write_reg(hw, 0x11, 0x0007);
1382 hw->phy.ops.write_reg(hw, 0x10, 0x8827);
1383 hw->phy.ops.write_reg(hw, 0x11, 0x0001);
1384 hw->phy.ops.write_reg(hw, 0x10, 0x8835);
1385 hw->phy.ops.write_reg(hw, 0x11, 0x0001);
1386 hw->phy.ops.write_reg(hw, 0x10, 0x8834);
1387 hw->phy.ops.write_reg(hw, 0x11, 0x0001);
1388 hw->phy.ops.write_reg(hw, 0x10, 0x8833);
1389 hw->phy.ops.write_reg(hw, 0x11, 0x0002);
1392 if (((hw->phy.type == e1000_phy_82577) &&
1393 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
1394 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
1395 /* Disable generation of early preamble */
1396 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
1400 /* Preamble tuning for SSC */
1401 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(770, 16), 0xA204);
1406 if (hw->phy.type == e1000_phy_82578) {
1407 if (hw->revision_id < 3) {
1409 ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x29,
1415 ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x1E,
1422 * Return registers to default by doing a soft reset then
1423 * writing 0x3140 to the control register.
1425 if (hw->phy.revision < 2) {
1426 e1000_phy_sw_reset_generic(hw);
1427 ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
1432 if ((hw->revision_id == 2) &&
1433 (hw->phy.type == e1000_phy_82577) &&
1434 ((hw->phy.revision == 2) || (hw->phy.revision == 3))) {
1436 * Workaround for OEM (GbE) not operating after reset -
1437 * restart AN (twice)
1439 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0400);
1442 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0400);
1448 ret_val = hw->phy.ops.acquire(hw);
1453 ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
1454 hw->phy.ops.release(hw);
1459 * Configure the K1 Si workaround during phy reset assuming there is
1460 * link so that it disables K1 if link is in 1Gbps.
1462 ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
1466 /* Workaround for link disconnects on a busy hub in half duplex */
1467 ret_val = hw->phy.ops.acquire(hw);
1470 ret_val = hw->phy.ops.read_reg_locked(hw,
1471 PHY_REG(BM_PORT_CTRL_PAGE, 17),
1475 ret_val = hw->phy.ops.write_reg_locked(hw,
1476 PHY_REG(BM_PORT_CTRL_PAGE, 17),
1479 hw->phy.ops.release(hw);
1485 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
1486 * @hw: pointer to the HW structure
1488 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
1493 DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
1495 /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
1496 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1497 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
1498 hw->phy.ops.write_reg(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF));
1499 hw->phy.ops.write_reg(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF));
1500 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
1501 hw->phy.ops.write_reg(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF));
1502 hw->phy.ops.write_reg(hw, BM_RAR_CTRL(i), (u16)((mac_reg >> 16) & 0x8000));
1506 static u32 e1000_calc_rx_da_crc(u8 mac[])
1508 u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
1509 u32 i, j, mask, crc;
1511 DEBUGFUNC("e1000_calc_rx_da_crc");
1514 for (i = 0; i < 6; i++) {
1516 for (j = 8; j > 0; j--) {
1517 mask = (crc & 1) * (-1);
1518 crc = (crc >> 1) ^ (poly & mask);
1525 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
1527 * @hw: pointer to the HW structure
1528 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
1530 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1532 s32 ret_val = E1000_SUCCESS;
1537 DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
1539 if (hw->mac.type != e1000_pch2lan)
1542 /* disable Rx path while enabling/disabling workaround */
1543 hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
1544 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
1550 * Write Rx addresses (rar_entry_count for RAL/H, +4 for
1551 * SHRAL/H) and initial CRC values to the MAC
1553 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1554 u8 mac_addr[ETH_ADDR_LEN] = {0};
1555 u32 addr_high, addr_low;
1557 addr_high = E1000_READ_REG(hw, E1000_RAH(i));
1558 if (!(addr_high & E1000_RAH_AV))
1560 addr_low = E1000_READ_REG(hw, E1000_RAL(i));
1561 mac_addr[0] = (addr_low & 0xFF);
1562 mac_addr[1] = ((addr_low >> 8) & 0xFF);
1563 mac_addr[2] = ((addr_low >> 16) & 0xFF);
1564 mac_addr[3] = ((addr_low >> 24) & 0xFF);
1565 mac_addr[4] = (addr_high & 0xFF);
1566 mac_addr[5] = ((addr_high >> 8) & 0xFF);
1568 E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
1569 e1000_calc_rx_da_crc(mac_addr));
1572 /* Write Rx addresses to the PHY */
1573 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
1575 /* Enable jumbo frame workaround in the MAC */
1576 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
1577 mac_reg &= ~(1 << 14);
1578 mac_reg |= (7 << 15);
1579 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
1581 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
1582 mac_reg |= E1000_RCTL_SECRC;
1583 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
1585 ret_val = e1000_read_kmrn_reg_generic(hw,
1586 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1590 ret_val = e1000_write_kmrn_reg_generic(hw,
1591 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1595 ret_val = e1000_read_kmrn_reg_generic(hw,
1596 E1000_KMRNCTRLSTA_HD_CTRL,
1600 data &= ~(0xF << 8);
1602 ret_val = e1000_write_kmrn_reg_generic(hw,
1603 E1000_KMRNCTRLSTA_HD_CTRL,
1608 /* Enable jumbo frame workaround in the PHY */
1609 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
1610 data &= ~(0x7F << 5);
1611 data |= (0x37 << 5);
1612 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
1615 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
1617 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
1620 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
1621 data &= ~(0x3FF << 2);
1622 data |= (0x1A << 2);
1623 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
1626 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xFE00);
1629 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
1630 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data | (1 << 10));
1634 /* Write MAC register values back to h/w defaults */
1635 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
1636 mac_reg &= ~(0xF << 14);
1637 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
1639 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
1640 mac_reg &= ~E1000_RCTL_SECRC;
1641 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
1643 ret_val = e1000_read_kmrn_reg_generic(hw,
1644 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1648 ret_val = e1000_write_kmrn_reg_generic(hw,
1649 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1653 ret_val = e1000_read_kmrn_reg_generic(hw,
1654 E1000_KMRNCTRLSTA_HD_CTRL,
1658 data &= ~(0xF << 8);
1660 ret_val = e1000_write_kmrn_reg_generic(hw,
1661 E1000_KMRNCTRLSTA_HD_CTRL,
1666 /* Write PHY register values back to h/w defaults */
1667 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
1668 data &= ~(0x7F << 5);
1669 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
1672 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
1674 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
1677 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
1678 data &= ~(0x3FF << 2);
1680 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
1683 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
1686 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
1687 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data & ~(1 << 10));
1692 /* re-enable Rx path after enabling/disabling workaround */
1693 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
1700 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1701 * done after every PHY reset.
1703 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1705 s32 ret_val = E1000_SUCCESS;
1707 DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
1709 if (hw->mac.type != e1000_pch2lan)
1712 /* Set MDIO slow mode before any other MDIO access */
1713 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1720 * e1000_k1_gig_workaround_lv - K1 Si workaround
1721 * @hw: pointer to the HW structure
1723 * Workaround to set the K1 beacon duration for 82579 parts
1725 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
1727 s32 ret_val = E1000_SUCCESS;
1731 DEBUGFUNC("e1000_k1_workaround_lv");
1733 if (hw->mac.type != e1000_pch2lan)
1736 /* Set K1 beacon duration based on 1Gbps speed or otherwise */
1737 ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
1741 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
1742 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
1743 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1744 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1746 if (status_reg & HV_M_STATUS_SPEED_1000)
1747 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1749 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
1751 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1759 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
1760 * @hw: pointer to the HW structure
1761 * @gate: boolean set to TRUE to gate, FALSE to un-gate
1763 * Gate/ungate the automatic PHY configuration via hardware; perform
1764 * the configuration via software instead.
1766 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
1770 DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
1772 if (hw->mac.type != e1000_pch2lan)
1775 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1778 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1780 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1782 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1787 * e1000_hv_phy_tuning_workaround_ich8lan - This is a Phy tuning work around
1788 * needed for Nahum3 + Hanksville testing, requested by HW team
1790 static s32 e1000_hv_phy_tuning_workaround_ich8lan(struct e1000_hw *hw)
1792 s32 ret_val = E1000_SUCCESS;
1794 DEBUGFUNC("e1000_hv_phy_tuning_workaround_ich8lan");
1796 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
1800 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(770, 16), 0xA204);
1804 ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x29, 0x66C0);
1808 ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x1E, 0xFFFF);
1815 * e1000_lan_init_done_ich8lan - Check for PHY config completion
1816 * @hw: pointer to the HW structure
1818 * Check the appropriate indication the MAC has finished configuring the
1819 * PHY after a software reset.
1821 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
1823 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
1825 DEBUGFUNC("e1000_lan_init_done_ich8lan");
1827 /* Wait for basic configuration completes before proceeding */
1829 data = E1000_READ_REG(hw, E1000_STATUS);
1830 data &= E1000_STATUS_LAN_INIT_DONE;
1832 } while ((!data) && --loop);
1835 * If basic configuration is incomplete before the above loop
1836 * count reaches 0, loading the configuration from NVM will
1837 * leave the PHY in a bad state possibly resulting in no link.
1840 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
1842 /* Clear the Init Done bit for the next init event */
1843 data = E1000_READ_REG(hw, E1000_STATUS);
1844 data &= ~E1000_STATUS_LAN_INIT_DONE;
1845 E1000_WRITE_REG(hw, E1000_STATUS, data);
1849 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
1850 * @hw: pointer to the HW structure
1852 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1854 s32 ret_val = E1000_SUCCESS;
1857 DEBUGFUNC("e1000_post_phy_reset_ich8lan");
1859 if (hw->phy.ops.check_reset_block(hw))
1862 /* Allow time for h/w to get to quiescent state after reset */
1865 /* Perform any necessary post-reset workarounds */
1866 switch (hw->mac.type) {
1868 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
1873 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
1881 if (hw->device_id == E1000_DEV_ID_ICH10_HANKSVILLE) {
1882 ret_val = e1000_hv_phy_tuning_workaround_ich8lan(hw);
1887 /* Dummy read to clear the phy wakeup bit after lcd reset */
1888 if (hw->mac.type >= e1000_pchlan)
1889 hw->phy.ops.read_reg(hw, BM_WUC, ®);
1891 /* Configure the LCD with the extended configuration region in NVM */
1892 ret_val = e1000_sw_lcd_config_ich8lan(hw);
1896 /* Configure the LCD with the OEM bits in NVM */
1897 ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
1899 /* Ungate automatic PHY configuration on non-managed 82579 */
1900 if ((hw->mac.type == e1000_pch2lan) &&
1901 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID)) {
1903 e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
1911 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
1912 * @hw: pointer to the HW structure
1915 * This is a function pointer entry point called by drivers
1916 * or other shared routines.
1918 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
1920 s32 ret_val = E1000_SUCCESS;
1922 DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
1924 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
1925 if ((hw->mac.type == e1000_pch2lan) &&
1926 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
1927 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
1929 ret_val = e1000_phy_hw_reset_generic(hw);
1933 ret_val = e1000_post_phy_reset_ich8lan(hw);
1940 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
1941 * @hw: pointer to the HW structure
1942 * @active: TRUE to enable LPLU, FALSE to disable
1944 * Sets the LPLU state according to the active flag. For PCH, if OEM write
1945 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
1946 * the phy speed. This function will manually set the LPLU bit and restart
1947 * auto-neg as hw would do. D3 and D0 LPLU will call the same function
1948 * since it configures the same bit.
1950 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
1952 s32 ret_val = E1000_SUCCESS;
1955 DEBUGFUNC("e1000_set_lplu_state_pchlan");
1957 ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
1962 oem_reg |= HV_OEM_BITS_LPLU;
1964 oem_reg &= ~HV_OEM_BITS_LPLU;
1966 oem_reg |= HV_OEM_BITS_RESTART_AN;
1967 ret_val = hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
1974 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
1975 * @hw: pointer to the HW structure
1976 * @active: TRUE to enable LPLU, FALSE to disable
1978 * Sets the LPLU D0 state according to the active flag. When
1979 * activating LPLU this function also disables smart speed
1980 * and vice versa. LPLU will not be activated unless the
1981 * device autonegotiation advertisement meets standards of
1982 * either 10 or 10/100 or 10/100/1000 at all duplexes.
1983 * This is a function pointer entry point only called by
1984 * PHY setup routines.
1986 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
1988 struct e1000_phy_info *phy = &hw->phy;
1990 s32 ret_val = E1000_SUCCESS;
1993 DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
1995 if (phy->type == e1000_phy_ife)
1998 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2001 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2002 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2004 if (phy->type != e1000_phy_igp_3)
2008 * Call gig speed drop workaround on LPLU before accessing
2011 if (hw->mac.type == e1000_ich8lan)
2012 e1000_gig_downshift_workaround_ich8lan(hw);
2014 /* When LPLU is enabled, we should disable SmartSpeed */
2015 ret_val = phy->ops.read_reg(hw,
2016 IGP01E1000_PHY_PORT_CONFIG,
2018 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2019 ret_val = phy->ops.write_reg(hw,
2020 IGP01E1000_PHY_PORT_CONFIG,
2025 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
2026 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2028 if (phy->type != e1000_phy_igp_3)
2032 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
2033 * during Dx states where the power conservation is most
2034 * important. During driver activity we should enable
2035 * SmartSpeed, so performance is maintained.
2037 if (phy->smart_speed == e1000_smart_speed_on) {
2038 ret_val = phy->ops.read_reg(hw,
2039 IGP01E1000_PHY_PORT_CONFIG,
2044 data |= IGP01E1000_PSCFR_SMART_SPEED;
2045 ret_val = phy->ops.write_reg(hw,
2046 IGP01E1000_PHY_PORT_CONFIG,
2050 } else if (phy->smart_speed == e1000_smart_speed_off) {
2051 ret_val = phy->ops.read_reg(hw,
2052 IGP01E1000_PHY_PORT_CONFIG,
2057 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2058 ret_val = phy->ops.write_reg(hw,
2059 IGP01E1000_PHY_PORT_CONFIG,
2071 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
2072 * @hw: pointer to the HW structure
2073 * @active: TRUE to enable LPLU, FALSE to disable
2075 * Sets the LPLU D3 state according to the active flag. When
2076 * activating LPLU this function also disables smart speed
2077 * and vice versa. LPLU will not be activated unless the
2078 * device autonegotiation advertisement meets standards of
2079 * either 10 or 10/100 or 10/100/1000 at all duplexes.
2080 * This is a function pointer entry point only called by
2081 * PHY setup routines.
2083 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2085 struct e1000_phy_info *phy = &hw->phy;
2087 s32 ret_val = E1000_SUCCESS;
2090 DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
2092 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2095 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
2096 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2098 if (phy->type != e1000_phy_igp_3)
2102 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
2103 * during Dx states where the power conservation is most
2104 * important. During driver activity we should enable
2105 * SmartSpeed, so performance is maintained.
2107 if (phy->smart_speed == e1000_smart_speed_on) {
2108 ret_val = phy->ops.read_reg(hw,
2109 IGP01E1000_PHY_PORT_CONFIG,
2114 data |= IGP01E1000_PSCFR_SMART_SPEED;
2115 ret_val = phy->ops.write_reg(hw,
2116 IGP01E1000_PHY_PORT_CONFIG,
2120 } else if (phy->smart_speed == e1000_smart_speed_off) {
2121 ret_val = phy->ops.read_reg(hw,
2122 IGP01E1000_PHY_PORT_CONFIG,
2127 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2128 ret_val = phy->ops.write_reg(hw,
2129 IGP01E1000_PHY_PORT_CONFIG,
2134 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
2135 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
2136 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
2137 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
2138 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2140 if (phy->type != e1000_phy_igp_3)
2144 * Call gig speed drop workaround on LPLU before accessing
2147 if (hw->mac.type == e1000_ich8lan)
2148 e1000_gig_downshift_workaround_ich8lan(hw);
2150 /* When LPLU is enabled, we should disable SmartSpeed */
2151 ret_val = phy->ops.read_reg(hw,
2152 IGP01E1000_PHY_PORT_CONFIG,
2157 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2158 ret_val = phy->ops.write_reg(hw,
2159 IGP01E1000_PHY_PORT_CONFIG,
2168 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
2169 * @hw: pointer to the HW structure
2170 * @bank: pointer to the variable that returns the active bank
2172 * Reads signature byte from the NVM using the flash access registers.
2173 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
2175 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2178 struct e1000_nvm_info *nvm = &hw->nvm;
2179 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
2180 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
2182 s32 ret_val = E1000_SUCCESS;
2184 DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
2186 switch (hw->mac.type) {
2189 eecd = E1000_READ_REG(hw, E1000_EECD);
2190 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
2191 E1000_EECD_SEC1VAL_VALID_MASK) {
2192 if (eecd & E1000_EECD_SEC1VAL)
2199 DEBUGOUT("Unable to determine valid NVM bank via EEC - "
2200 "reading flash signature\n");
2203 /* set bank to 0 in case flash read fails */
2207 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
2211 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2212 E1000_ICH_NVM_SIG_VALUE) {
2218 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
2223 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2224 E1000_ICH_NVM_SIG_VALUE) {
2229 DEBUGOUT("ERROR: No valid NVM bank present\n");
2230 ret_val = -E1000_ERR_NVM;
2238 * e1000_read_nvm_ich8lan - Read word(s) from the NVM
2239 * @hw: pointer to the HW structure
2240 * @offset: The offset (in bytes) of the word(s) to read.
2241 * @words: Size of data to read in words
2242 * @data: Pointer to the word(s) to read at offset.
2244 * Reads a word(s) from the NVM using the flash access registers.
2246 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2249 struct e1000_nvm_info *nvm = &hw->nvm;
2250 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2252 s32 ret_val = E1000_SUCCESS;
2256 DEBUGFUNC("e1000_read_nvm_ich8lan");
2258 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2260 DEBUGOUT("nvm parameter(s) out of bounds\n");
2261 ret_val = -E1000_ERR_NVM;
2265 nvm->ops.acquire(hw);
2267 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2268 if (ret_val != E1000_SUCCESS) {
2269 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2273 act_offset = (bank) ? nvm->flash_bank_size : 0;
2274 act_offset += offset;
2276 ret_val = E1000_SUCCESS;
2277 for (i = 0; i < words; i++) {
2278 if ((dev_spec->shadow_ram) &&
2279 (dev_spec->shadow_ram[offset+i].modified)) {
2280 data[i] = dev_spec->shadow_ram[offset+i].value;
2282 ret_val = e1000_read_flash_word_ich8lan(hw,
2291 nvm->ops.release(hw);
2295 DEBUGOUT1("NVM read error: %d\n", ret_val);
2301 * e1000_flash_cycle_init_ich8lan - Initialize flash
2302 * @hw: pointer to the HW structure
2304 * This function does initial flash setup so that a new read/write/erase cycle
2307 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2309 union ich8_hws_flash_status hsfsts;
2310 s32 ret_val = -E1000_ERR_NVM;
2313 DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
2315 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2317 /* Check if the flash descriptor is valid */
2318 if (hsfsts.hsf_status.fldesvalid == 0) {
2319 DEBUGOUT("Flash descriptor invalid. "
2320 "SW Sequencing must be used.");
2324 /* Clear FCERR and DAEL in hw status by writing 1 */
2325 hsfsts.hsf_status.flcerr = 1;
2326 hsfsts.hsf_status.dael = 1;
2328 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2331 * Either we should have a hardware SPI cycle in progress
2332 * bit to check against, in order to start a new cycle or
2333 * FDONE bit should be changed in the hardware so that it
2334 * is 1 after hardware reset, which can then be used as an
2335 * indication whether a cycle is in progress or has been
2339 if (hsfsts.hsf_status.flcinprog == 0) {
2341 * There is no cycle running at present,
2342 * so we can start a cycle.
2343 * Begin by setting Flash Cycle Done.
2345 hsfsts.hsf_status.flcdone = 1;
2346 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2347 ret_val = E1000_SUCCESS;
2350 * Otherwise poll for sometime so the current
2351 * cycle has a chance to end before giving up.
2353 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
2354 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2356 if (hsfsts.hsf_status.flcinprog == 0) {
2357 ret_val = E1000_SUCCESS;
2362 if (ret_val == E1000_SUCCESS) {
2364 * Successful in waiting for previous cycle to timeout,
2365 * now set the Flash Cycle Done.
2367 hsfsts.hsf_status.flcdone = 1;
2368 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
2371 DEBUGOUT("Flash controller busy, cannot get access");
2380 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
2381 * @hw: pointer to the HW structure
2382 * @timeout: maximum time to wait for completion
2384 * This function starts a flash cycle and waits for its completion.
2386 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
2388 union ich8_hws_flash_ctrl hsflctl;
2389 union ich8_hws_flash_status hsfsts;
2390 s32 ret_val = -E1000_ERR_NVM;
2393 DEBUGFUNC("e1000_flash_cycle_ich8lan");
2395 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
2396 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2397 hsflctl.hsf_ctrl.flcgo = 1;
2398 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2400 /* wait till FDONE bit is set to 1 */
2402 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2403 if (hsfsts.hsf_status.flcdone == 1)
2406 } while (i++ < timeout);
2408 if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0)
2409 ret_val = E1000_SUCCESS;
2415 * e1000_read_flash_word_ich8lan - Read word from flash
2416 * @hw: pointer to the HW structure
2417 * @offset: offset to data location
2418 * @data: pointer to the location for storing the data
2420 * Reads the flash word at offset into data. Offset is converted
2421 * to bytes before read.
2423 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
2428 DEBUGFUNC("e1000_read_flash_word_ich8lan");
2431 ret_val = -E1000_ERR_NVM;
2435 /* Must convert offset into bytes. */
2438 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 2, data);
2445 * e1000_read_flash_byte_ich8lan - Read byte from flash
2446 * @hw: pointer to the HW structure
2447 * @offset: The offset of the byte to read.
2448 * @data: Pointer to a byte to store the value read.
2450 * Reads a single byte from the NVM using the flash access registers.
2452 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2455 s32 ret_val = E1000_SUCCESS;
2458 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
2469 * e1000_read_flash_data_ich8lan - Read byte or word from NVM
2470 * @hw: pointer to the HW structure
2471 * @offset: The offset (in bytes) of the byte or word to read.
2472 * @size: Size of data to read, 1=byte 2=word
2473 * @data: Pointer to the word to store the value read.
2475 * Reads a byte or word from the NVM using the flash access registers.
2477 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2480 union ich8_hws_flash_status hsfsts;
2481 union ich8_hws_flash_ctrl hsflctl;
2482 u32 flash_linear_addr;
2484 s32 ret_val = -E1000_ERR_NVM;
2487 DEBUGFUNC("e1000_read_flash_data_ich8lan");
2489 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
2492 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2493 hw->nvm.flash_base_addr;
2498 ret_val = e1000_flash_cycle_init_ich8lan(hw);
2499 if (ret_val != E1000_SUCCESS)
2502 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2503 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2504 hsflctl.hsf_ctrl.fldbcount = size - 1;
2505 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
2506 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2508 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
2510 ret_val = e1000_flash_cycle_ich8lan(hw,
2511 ICH_FLASH_READ_COMMAND_TIMEOUT);
2514 * Check if FCERR is set to 1, if set to 1, clear it
2515 * and try the whole sequence a few more times, else
2516 * read in (shift in) the Flash Data0, the order is
2517 * least significant byte first msb to lsb
2519 if (ret_val == E1000_SUCCESS) {
2520 flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
2522 *data = (u8)(flash_data & 0x000000FF);
2524 *data = (u16)(flash_data & 0x0000FFFF);
2528 * If we've gotten here, then things are probably
2529 * completely hosed, but if the error condition is
2530 * detected, it won't hurt to give it another try...
2531 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
2533 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2535 if (hsfsts.hsf_status.flcerr == 1) {
2536 /* Repeat for some time before giving up. */
2538 } else if (hsfsts.hsf_status.flcdone == 0) {
2539 DEBUGOUT("Timeout error - flash cycle "
2540 "did not complete.");
2544 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2551 * e1000_write_nvm_ich8lan - Write word(s) to the NVM
2552 * @hw: pointer to the HW structure
2553 * @offset: The offset (in bytes) of the word(s) to write.
2554 * @words: Size of data to write in words
2555 * @data: Pointer to the word(s) to write at offset.
2557 * Writes a byte or word to the NVM using the flash access registers.
2559 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2562 struct e1000_nvm_info *nvm = &hw->nvm;
2563 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2564 s32 ret_val = E1000_SUCCESS;
2567 DEBUGFUNC("e1000_write_nvm_ich8lan");
2569 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2571 DEBUGOUT("nvm parameter(s) out of bounds\n");
2572 ret_val = -E1000_ERR_NVM;
2576 nvm->ops.acquire(hw);
2578 for (i = 0; i < words; i++) {
2579 dev_spec->shadow_ram[offset+i].modified = TRUE;
2580 dev_spec->shadow_ram[offset+i].value = data[i];
2583 nvm->ops.release(hw);
2590 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
2591 * @hw: pointer to the HW structure
2593 * The NVM checksum is updated by calling the generic update_nvm_checksum,
2594 * which writes the checksum to the shadow ram. The changes in the shadow
2595 * ram are then committed to the EEPROM by processing each bank at a time
2596 * checking for the modified bit and writing only the pending changes.
2597 * After a successful commit, the shadow ram is cleared and is ready for
2600 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2602 struct e1000_nvm_info *nvm = &hw->nvm;
2603 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2604 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
2608 DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
2610 ret_val = e1000_update_nvm_checksum_generic(hw);
2614 if (nvm->type != e1000_nvm_flash_sw)
2617 nvm->ops.acquire(hw);
2620 * We're writing to the opposite bank so if we're on bank 1,
2621 * write to bank 0 etc. We also need to erase the segment that
2622 * is going to be written
2624 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2625 if (ret_val != E1000_SUCCESS) {
2626 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2631 new_bank_offset = nvm->flash_bank_size;
2632 old_bank_offset = 0;
2633 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
2637 old_bank_offset = nvm->flash_bank_size;
2638 new_bank_offset = 0;
2639 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
2644 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2646 * Determine whether to write the value stored
2647 * in the other NVM bank or a modified value stored
2650 if (dev_spec->shadow_ram[i].modified) {
2651 data = dev_spec->shadow_ram[i].value;
2653 ret_val = e1000_read_flash_word_ich8lan(hw, i +
2661 * If the word is 0x13, then make sure the signature bits
2662 * (15:14) are 11b until the commit has completed.
2663 * This will allow us to write 10b which indicates the
2664 * signature is valid. We want to do this after the write
2665 * has completed so that we don't mark the segment valid
2666 * while the write is still in progress
2668 if (i == E1000_ICH_NVM_SIG_WORD)
2669 data |= E1000_ICH_NVM_SIG_MASK;
2671 /* Convert offset to bytes. */
2672 act_offset = (i + new_bank_offset) << 1;
2675 /* Write the bytes to the new bank. */
2676 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2683 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2691 * Don't bother writing the segment valid bits if sector
2692 * programming failed.
2695 DEBUGOUT("Flash commit failed.\n");
2700 * Finally validate the new segment by setting bit 15:14
2701 * to 10b in word 0x13 , this can be done without an
2702 * erase as well since these bits are 11 to start with
2703 * and we need to change bit 14 to 0b
2705 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
2706 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
2711 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2718 * And invalidate the previously valid segment by setting
2719 * its signature word (0x13) high_byte to 0b. This can be
2720 * done without an erase because flash erase sets all bits
2721 * to 1's. We can write 1's to 0's without an erase
2723 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
2724 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
2728 /* Great! Everything worked, we can now clear the cached entries. */
2729 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2730 dev_spec->shadow_ram[i].modified = FALSE;
2731 dev_spec->shadow_ram[i].value = 0xFFFF;
2735 nvm->ops.release(hw);
2738 * Reload the EEPROM, or else modifications will not appear
2739 * until after the next adapter reset.
2742 nvm->ops.reload(hw);
2748 DEBUGOUT1("NVM update error: %d\n", ret_val);
2754 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
2755 * @hw: pointer to the HW structure
2757 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
2758 * If the bit is 0, that the EEPROM had been modified, but the checksum was not
2759 * calculated, in which case we need to calculate the checksum and set bit 6.
2761 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
2763 s32 ret_val = E1000_SUCCESS;
2766 DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
2769 * Read 0x19 and check bit 6. If this bit is 0, the checksum
2770 * needs to be fixed. This bit is an indication that the NVM
2771 * was prepared by OEM software and did not calculate the
2772 * checksum...a likely scenario.
2774 ret_val = hw->nvm.ops.read(hw, 0x19, 1, &data);
2778 if ((data & 0x40) == 0) {
2780 ret_val = hw->nvm.ops.write(hw, 0x19, 1, &data);
2783 ret_val = hw->nvm.ops.update(hw);
2788 ret_val = e1000_validate_nvm_checksum_generic(hw);
2795 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
2796 * @hw: pointer to the HW structure
2797 * @offset: The offset (in bytes) of the byte/word to read.
2798 * @size: Size of data to read, 1=byte 2=word
2799 * @data: The byte(s) to write to the NVM.
2801 * Writes one/two bytes to the NVM using the flash access registers.
2803 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2806 union ich8_hws_flash_status hsfsts;
2807 union ich8_hws_flash_ctrl hsflctl;
2808 u32 flash_linear_addr;
2810 s32 ret_val = -E1000_ERR_NVM;
2813 DEBUGFUNC("e1000_write_ich8_data");
2815 if (size < 1 || size > 2 || data > size * 0xff ||
2816 offset > ICH_FLASH_LINEAR_ADDR_MASK)
2819 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2820 hw->nvm.flash_base_addr;
2825 ret_val = e1000_flash_cycle_init_ich8lan(hw);
2826 if (ret_val != E1000_SUCCESS)
2829 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2830 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2831 hsflctl.hsf_ctrl.fldbcount = size - 1;
2832 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
2833 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2835 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
2838 flash_data = (u32)data & 0x00FF;
2840 flash_data = (u32)data;
2842 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
2845 * check if FCERR is set to 1 , if set to 1, clear it
2846 * and try the whole sequence a few more times else done
2848 ret_val = e1000_flash_cycle_ich8lan(hw,
2849 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
2850 if (ret_val == E1000_SUCCESS)
2854 * If we're here, then things are most likely
2855 * completely hosed, but if the error condition
2856 * is detected, it won't hurt to give it another
2857 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
2859 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2860 if (hsfsts.hsf_status.flcerr == 1)
2861 /* Repeat for some time before giving up. */
2863 if (hsfsts.hsf_status.flcdone == 0) {
2864 DEBUGOUT("Timeout error - flash cycle "
2865 "did not complete.");
2868 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2875 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
2876 * @hw: pointer to the HW structure
2877 * @offset: The index of the byte to read.
2878 * @data: The byte to write to the NVM.
2880 * Writes a single byte to the NVM using the flash access registers.
2882 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2885 u16 word = (u16)data;
2887 DEBUGFUNC("e1000_write_flash_byte_ich8lan");
2889 return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
2893 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
2894 * @hw: pointer to the HW structure
2895 * @offset: The offset of the byte to write.
2896 * @byte: The byte to write to the NVM.
2898 * Writes a single byte to the NVM using the flash access registers.
2899 * Goes through a retry algorithm before giving up.
2901 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
2902 u32 offset, u8 byte)
2905 u16 program_retries;
2907 DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
2909 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2910 if (ret_val == E1000_SUCCESS)
2913 for (program_retries = 0; program_retries < 100; program_retries++) {
2914 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
2916 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2917 if (ret_val == E1000_SUCCESS)
2920 if (program_retries == 100) {
2921 ret_val = -E1000_ERR_NVM;
2930 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
2931 * @hw: pointer to the HW structure
2932 * @bank: 0 for first bank, 1 for second bank, etc.
2934 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
2935 * bank N is 4096 * N + flash_reg_addr.
2937 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
2939 struct e1000_nvm_info *nvm = &hw->nvm;
2940 union ich8_hws_flash_status hsfsts;
2941 union ich8_hws_flash_ctrl hsflctl;
2942 u32 flash_linear_addr;
2943 /* bank size is in 16bit words - adjust to bytes */
2944 u32 flash_bank_size = nvm->flash_bank_size * 2;
2945 s32 ret_val = E1000_SUCCESS;
2947 s32 j, iteration, sector_size;
2949 DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
2951 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2954 * Determine HW Sector size: Read BERASE bits of hw flash status
2956 * 00: The Hw sector is 256 bytes, hence we need to erase 16
2957 * consecutive sectors. The start index for the nth Hw sector
2958 * can be calculated as = bank * 4096 + n * 256
2959 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
2960 * The start index for the nth Hw sector can be calculated
2962 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
2963 * (ich9 only, otherwise error condition)
2964 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
2966 switch (hsfsts.hsf_status.berasesz) {
2968 /* Hw sector size 256 */
2969 sector_size = ICH_FLASH_SEG_SIZE_256;
2970 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
2973 sector_size = ICH_FLASH_SEG_SIZE_4K;
2977 sector_size = ICH_FLASH_SEG_SIZE_8K;
2981 sector_size = ICH_FLASH_SEG_SIZE_64K;
2985 ret_val = -E1000_ERR_NVM;
2989 /* Start with the base address, then add the sector offset. */
2990 flash_linear_addr = hw->nvm.flash_base_addr;
2991 flash_linear_addr += (bank) ? flash_bank_size : 0;
2993 for (j = 0; j < iteration ; j++) {
2996 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3001 * Write a value 11 (block Erase) in Flash
3002 * Cycle field in hw flash control
3004 hsflctl.regval = E1000_READ_FLASH_REG16(hw,
3006 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3007 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
3011 * Write the last 24 bits of an index within the
3012 * block into Flash Linear address field in Flash
3015 flash_linear_addr += (j * sector_size);
3016 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
3019 ret_val = e1000_flash_cycle_ich8lan(hw,
3020 ICH_FLASH_ERASE_COMMAND_TIMEOUT);
3021 if (ret_val == E1000_SUCCESS)
3025 * Check if FCERR is set to 1. If 1,
3026 * clear it and try the whole sequence
3027 * a few more times else Done
3029 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3031 if (hsfsts.hsf_status.flcerr == 1)
3032 /* repeat for some time before giving up */
3034 else if (hsfsts.hsf_status.flcdone == 0)
3036 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
3044 * e1000_valid_led_default_ich8lan - Set the default LED settings
3045 * @hw: pointer to the HW structure
3046 * @data: Pointer to the LED settings
3048 * Reads the LED default settings from the NVM to data. If the NVM LED
3049 * settings is all 0's or F's, set the LED default to a valid LED default
3052 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
3056 DEBUGFUNC("e1000_valid_led_default_ich8lan");
3058 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
3060 DEBUGOUT("NVM Read Error\n");
3064 if (*data == ID_LED_RESERVED_0000 ||
3065 *data == ID_LED_RESERVED_FFFF)
3066 *data = ID_LED_DEFAULT_ICH8LAN;
3073 * e1000_id_led_init_pchlan - store LED configurations
3074 * @hw: pointer to the HW structure
3076 * PCH does not control LEDs via the LEDCTL register, rather it uses
3077 * the PHY LED configuration register.
3079 * PCH also does not have an "always on" or "always off" mode which
3080 * complicates the ID feature. Instead of using the "on" mode to indicate
3081 * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
3082 * use "link_up" mode. The LEDs will still ID on request if there is no
3083 * link based on logic in e1000_led_[on|off]_pchlan().
3085 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
3087 struct e1000_mac_info *mac = &hw->mac;
3089 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
3090 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
3091 u16 data, i, temp, shift;
3093 DEBUGFUNC("e1000_id_led_init_pchlan");
3095 /* Get default ID LED modes */
3096 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
3100 mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
3101 mac->ledctl_mode1 = mac->ledctl_default;
3102 mac->ledctl_mode2 = mac->ledctl_default;
3104 for (i = 0; i < 4; i++) {
3105 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
3108 case ID_LED_ON1_DEF2:
3109 case ID_LED_ON1_ON2:
3110 case ID_LED_ON1_OFF2:
3111 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3112 mac->ledctl_mode1 |= (ledctl_on << shift);
3114 case ID_LED_OFF1_DEF2:
3115 case ID_LED_OFF1_ON2:
3116 case ID_LED_OFF1_OFF2:
3117 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3118 mac->ledctl_mode1 |= (ledctl_off << shift);
3125 case ID_LED_DEF1_ON2:
3126 case ID_LED_ON1_ON2:
3127 case ID_LED_OFF1_ON2:
3128 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3129 mac->ledctl_mode2 |= (ledctl_on << shift);
3131 case ID_LED_DEF1_OFF2:
3132 case ID_LED_ON1_OFF2:
3133 case ID_LED_OFF1_OFF2:
3134 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3135 mac->ledctl_mode2 |= (ledctl_off << shift);
3148 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
3149 * @hw: pointer to the HW structure
3151 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
3152 * register, so the the bus width is hard coded.
3154 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
3156 struct e1000_bus_info *bus = &hw->bus;
3159 DEBUGFUNC("e1000_get_bus_info_ich8lan");
3161 ret_val = e1000_get_bus_info_pcie_generic(hw);
3164 * ICH devices are "PCI Express"-ish. They have
3165 * a configuration space, but do not contain
3166 * PCI Express Capability registers, so bus width
3167 * must be hardcoded.
3169 if (bus->width == e1000_bus_width_unknown)
3170 bus->width = e1000_bus_width_pcie_x1;
3176 * e1000_reset_hw_ich8lan - Reset the hardware
3177 * @hw: pointer to the HW structure
3179 * Does a full reset of the hardware which includes a reset of the PHY and
3182 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3184 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3189 DEBUGFUNC("e1000_reset_hw_ich8lan");
3192 * Prevent the PCI-E bus from sticking if there is no TLP connection
3193 * on the last TLP read/write transaction when MAC is reset.
3195 ret_val = e1000_disable_pcie_master_generic(hw);
3197 DEBUGOUT("PCI-E Master disable polling has failed.\n");
3199 DEBUGOUT("Masking off all interrupts\n");
3200 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3203 * Disable the Transmit and Receive units. Then delay to allow
3204 * any pending transactions to complete before we hit the MAC
3205 * with the global reset.
3207 E1000_WRITE_REG(hw, E1000_RCTL, 0);
3208 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
3209 E1000_WRITE_FLUSH(hw);
3213 /* Workaround for ICH8 bit corruption issue in FIFO memory */
3214 if (hw->mac.type == e1000_ich8lan) {
3215 /* Set Tx and Rx buffer allocation to 8k apiece. */
3216 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
3217 /* Set Packet Buffer Size to 16k. */
3218 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
3221 if (hw->mac.type == e1000_pchlan) {
3222 /* Save the NVM K1 bit setting*/
3223 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, ®);
3227 if (reg & E1000_NVM_K1_ENABLE)
3228 dev_spec->nvm_k1_enabled = TRUE;
3230 dev_spec->nvm_k1_enabled = FALSE;
3233 ctrl = E1000_READ_REG(hw, E1000_CTRL);
3235 if (!hw->phy.ops.check_reset_block(hw)) {
3237 * Full-chip reset requires MAC and PHY reset at the same
3238 * time to make sure the interface between MAC and the
3239 * external PHY is reset.
3241 ctrl |= E1000_CTRL_PHY_RST;
3244 * Gate automatic PHY configuration by hardware on
3247 if ((hw->mac.type == e1000_pch2lan) &&
3248 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3249 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
3251 ret_val = e1000_acquire_swflag_ich8lan(hw);
3252 DEBUGOUT("Issuing a global reset to ich8lan\n");
3253 E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
3257 e1000_release_swflag_ich8lan(hw);
3259 if (ctrl & E1000_CTRL_PHY_RST) {
3260 ret_val = hw->phy.ops.get_cfg_done(hw);
3264 ret_val = e1000_post_phy_reset_ich8lan(hw);
3270 * For PCH, this write will make sure that any noise
3271 * will be detected as a CRC error and be dropped rather than show up
3272 * as a bad packet to the DMA engine.
3274 if (hw->mac.type == e1000_pchlan)
3275 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
3277 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3278 icr = E1000_READ_REG(hw, E1000_ICR);
3280 kab = E1000_READ_REG(hw, E1000_KABGTXD);
3281 kab |= E1000_KABGTXD_BGSQLBIAS;
3282 E1000_WRITE_REG(hw, E1000_KABGTXD, kab);
3289 * e1000_init_hw_ich8lan - Initialize the hardware
3290 * @hw: pointer to the HW structure
3292 * Prepares the hardware for transmit and receive by doing the following:
3293 * - initialize hardware bits
3294 * - initialize LED identification
3295 * - setup receive address registers
3296 * - setup flow control
3297 * - setup transmit descriptors
3298 * - clear statistics
3300 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3302 struct e1000_mac_info *mac = &hw->mac;
3303 u32 ctrl_ext, txdctl, snoop;
3307 DEBUGFUNC("e1000_init_hw_ich8lan");
3309 e1000_initialize_hw_bits_ich8lan(hw);
3311 /* Initialize identification LED */
3312 ret_val = mac->ops.id_led_init(hw);
3314 DEBUGOUT("Error initializing identification LED\n");
3315 /* This is not fatal and we should not stop init due to this */
3317 /* Setup the receive address. */
3318 e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
3320 /* Zero out the Multicast HASH table */
3321 DEBUGOUT("Zeroing the MTA\n");
3322 for (i = 0; i < mac->mta_reg_count; i++)
3323 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
3326 * The 82578 Rx buffer will stall if wakeup is enabled in host and
3327 * the ME. Reading the BM_WUC register will clear the host wakeup bit.
3328 * Reset the phy after disabling host wakeup to reset the Rx buffer.
3330 if (hw->phy.type == e1000_phy_82578) {
3331 hw->phy.ops.read_reg(hw, BM_WUC, &i);
3332 ret_val = e1000_phy_hw_reset_ich8lan(hw);
3337 /* Setup link and flow control */
3338 ret_val = mac->ops.setup_link(hw);
3340 /* Set the transmit descriptor write-back policy for both queues */
3341 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
3342 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3343 E1000_TXDCTL_FULL_TX_DESC_WB;
3344 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3345 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3346 E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
3347 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
3348 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3349 E1000_TXDCTL_FULL_TX_DESC_WB;
3350 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3351 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3352 E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
3355 * ICH8 has opposite polarity of no_snoop bits.
3356 * By default, we should use snoop behavior.
3358 if (mac->type == e1000_ich8lan)
3359 snoop = PCIE_ICH8_SNOOP_ALL;
3361 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
3362 e1000_set_pcie_no_snoop_generic(hw, snoop);
3364 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
3365 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
3366 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
3369 * Clear all of the statistics registers (clear on read). It is
3370 * important that we do this after we have tried to establish link
3371 * because the symbol error count will increment wildly if there
3374 e1000_clear_hw_cntrs_ich8lan(hw);
3379 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
3380 * @hw: pointer to the HW structure
3382 * Sets/Clears required hardware bits necessary for correctly setting up the
3383 * hardware for transmit and receive.
3385 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
3389 DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
3391 /* Extended Device Control */
3392 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
3394 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3395 if (hw->mac.type >= e1000_pchlan)
3396 reg |= E1000_CTRL_EXT_PHYPDEN;
3397 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
3399 /* Transmit Descriptor Control 0 */
3400 reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
3402 E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
3404 /* Transmit Descriptor Control 1 */
3405 reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
3407 E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
3409 /* Transmit Arbitration Control 0 */
3410 reg = E1000_READ_REG(hw, E1000_TARC(0));
3411 if (hw->mac.type == e1000_ich8lan)
3412 reg |= (1 << 28) | (1 << 29);
3413 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
3414 E1000_WRITE_REG(hw, E1000_TARC(0), reg);
3416 /* Transmit Arbitration Control 1 */
3417 reg = E1000_READ_REG(hw, E1000_TARC(1));
3418 if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
3422 reg |= (1 << 24) | (1 << 26) | (1 << 30);
3423 E1000_WRITE_REG(hw, E1000_TARC(1), reg);
3426 if (hw->mac.type == e1000_ich8lan) {
3427 reg = E1000_READ_REG(hw, E1000_STATUS);
3429 E1000_WRITE_REG(hw, E1000_STATUS, reg);
3433 * work-around descriptor data corruption issue during nfs v2 udp
3434 * traffic, just disable the nfs filtering capability
3436 reg = E1000_READ_REG(hw, E1000_RFCTL);
3437 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
3438 E1000_WRITE_REG(hw, E1000_RFCTL, reg);
3444 * e1000_setup_link_ich8lan - Setup flow control and link settings
3445 * @hw: pointer to the HW structure
3447 * Determines which flow control settings to use, then configures flow
3448 * control. Calls the appropriate media-specific link configuration
3449 * function. Assuming the adapter has a valid link partner, a valid link
3450 * should be established. Assumes the hardware has previously been reset
3451 * and the transmitter and receiver are not enabled.
3453 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3455 s32 ret_val = E1000_SUCCESS;
3457 DEBUGFUNC("e1000_setup_link_ich8lan");
3459 if (hw->phy.ops.check_reset_block(hw))
3463 * ICH parts do not have a word in the NVM to determine
3464 * the default flow control setting, so we explicitly
3467 if (hw->fc.requested_mode == e1000_fc_default)
3468 hw->fc.requested_mode = e1000_fc_full;
3471 * Save off the requested flow control mode for use later. Depending
3472 * on the link partner's capabilities, we may or may not use this mode.
3474 hw->fc.current_mode = hw->fc.requested_mode;
3476 DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
3477 hw->fc.current_mode);
3479 /* Continue to configure the copper link. */
3480 ret_val = hw->mac.ops.setup_physical_interface(hw);
3484 E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
3485 if ((hw->phy.type == e1000_phy_82578) ||
3486 (hw->phy.type == e1000_phy_82579) ||
3487 (hw->phy.type == e1000_phy_82577)) {
3488 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
3490 ret_val = hw->phy.ops.write_reg(hw,
3491 PHY_REG(BM_PORT_CTRL_PAGE, 27),
3497 ret_val = e1000_set_fc_watermarks_generic(hw);
3504 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
3505 * @hw: pointer to the HW structure
3507 * Configures the kumeran interface to the PHY to wait the appropriate time
3508 * when polling the PHY, then call the generic setup_copper_link to finish
3509 * configuring the copper link.
3511 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3517 DEBUGFUNC("e1000_setup_copper_link_ich8lan");
3519 ctrl = E1000_READ_REG(hw, E1000_CTRL);
3520 ctrl |= E1000_CTRL_SLU;
3521 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
3522 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
3525 * Set the mac to wait the maximum time between each iteration
3526 * and increase the max iterations when polling the phy;
3527 * this fixes erroneous timeouts at 10Mbps.
3529 ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
3533 ret_val = e1000_read_kmrn_reg_generic(hw,
3534 E1000_KMRNCTRLSTA_INBAND_PARAM,
3539 ret_val = e1000_write_kmrn_reg_generic(hw,
3540 E1000_KMRNCTRLSTA_INBAND_PARAM,
3545 switch (hw->phy.type) {
3546 case e1000_phy_igp_3:
3547 ret_val = e1000_copper_link_setup_igp(hw);
3552 case e1000_phy_82578:
3553 ret_val = e1000_copper_link_setup_m88(hw);
3557 case e1000_phy_82577:
3558 case e1000_phy_82579:
3559 ret_val = e1000_copper_link_setup_82577(hw);
3564 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
3569 reg_data &= ~IFE_PMC_AUTO_MDIX;
3571 switch (hw->phy.mdix) {
3573 reg_data &= ~IFE_PMC_FORCE_MDIX;
3576 reg_data |= IFE_PMC_FORCE_MDIX;
3580 reg_data |= IFE_PMC_AUTO_MDIX;
3583 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
3591 ret_val = e1000_setup_copper_link_generic(hw);
3598 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
3599 * @hw: pointer to the HW structure
3600 * @speed: pointer to store current link speed
3601 * @duplex: pointer to store the current link duplex
3603 * Calls the generic get_speed_and_duplex to retrieve the current link
3604 * information and then calls the Kumeran lock loss workaround for links at
3607 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
3612 DEBUGFUNC("e1000_get_link_up_info_ich8lan");
3614 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
3618 if ((hw->mac.type == e1000_ich8lan) &&
3619 (hw->phy.type == e1000_phy_igp_3) &&
3620 (*speed == SPEED_1000)) {
3621 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
3629 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
3630 * @hw: pointer to the HW structure
3632 * Work-around for 82566 Kumeran PCS lock loss:
3633 * On link status change (i.e. PCI reset, speed change) and link is up and
3635 * 0) if workaround is optionally disabled do nothing
3636 * 1) wait 1ms for Kumeran link to come up
3637 * 2) check Kumeran Diagnostic register PCS lock loss bit
3638 * 3) if not set the link is locked (all is good), otherwise...
3640 * 5) repeat up to 10 times
3641 * Note: this is only called for IGP3 copper when speed is 1gb.
3643 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
3645 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3647 s32 ret_val = E1000_SUCCESS;
3651 DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
3653 if (!(dev_spec->kmrn_lock_loss_workaround_enabled))
3657 * Make sure link is up before proceeding. If not just return.
3658 * Attempting this while link is negotiating fouled up link
3661 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
3663 ret_val = E1000_SUCCESS;
3667 for (i = 0; i < 10; i++) {
3668 /* read once to clear */
3669 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3672 /* and again to get new status */
3673 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3677 /* check for PCS lock */
3678 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) {
3679 ret_val = E1000_SUCCESS;
3683 /* Issue PHY reset */
3684 hw->phy.ops.reset(hw);
3687 /* Disable GigE link negotiation */
3688 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3689 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
3690 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3691 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3694 * Call gig speed drop workaround on Gig disable before accessing
3697 e1000_gig_downshift_workaround_ich8lan(hw);
3699 /* unable to acquire PCS lock */
3700 ret_val = -E1000_ERR_PHY;
3707 * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
3708 * @hw: pointer to the HW structure
3709 * @state: boolean value used to set the current Kumeran workaround state
3711 * If ICH8, set the current Kumeran workaround state (enabled - TRUE
3712 * /disabled - FALSE).
3714 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
3717 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3719 DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
3721 if (hw->mac.type != e1000_ich8lan) {
3722 DEBUGOUT("Workaround applies to ICH8 only.\n");
3726 dev_spec->kmrn_lock_loss_workaround_enabled = state;
3732 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
3733 * @hw: pointer to the HW structure
3735 * Workaround for 82566 power-down on D3 entry:
3736 * 1) disable gigabit link
3737 * 2) write VR power-down enable
3739 * Continue if successful, else issue LCD reset and repeat
3741 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
3747 DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
3749 if (hw->phy.type != e1000_phy_igp_3)
3752 /* Try the workaround twice (if needed) */
3755 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
3756 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
3757 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3758 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
3761 * Call gig speed drop workaround on Gig disable before
3762 * accessing any PHY registers
3764 if (hw->mac.type == e1000_ich8lan)
3765 e1000_gig_downshift_workaround_ich8lan(hw);
3767 /* Write VR power-down enable */
3768 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3769 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3770 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
3771 data | IGP3_VR_CTRL_MODE_SHUTDOWN);
3773 /* Read it back and test */
3774 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3775 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3776 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
3779 /* Issue PHY reset and repeat at most one more time */
3780 reg = E1000_READ_REG(hw, E1000_CTRL);
3781 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
3790 * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
3791 * @hw: pointer to the HW structure
3793 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
3794 * LPLU, Gig disable, MDIC PHY reset):
3795 * 1) Set Kumeran Near-end loopback
3796 * 2) Clear Kumeran Near-end loopback
3797 * Should only be called for ICH8[m] devices with IGP_3 Phy.
3799 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3801 s32 ret_val = E1000_SUCCESS;
3804 DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
3806 if ((hw->mac.type != e1000_ich8lan) ||
3807 (hw->phy.type != e1000_phy_igp_3))
3810 ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3814 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
3815 ret_val = e1000_write_kmrn_reg_generic(hw,
3816 E1000_KMRNCTRLSTA_DIAG_OFFSET,
3820 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
3821 ret_val = e1000_write_kmrn_reg_generic(hw,
3822 E1000_KMRNCTRLSTA_DIAG_OFFSET,
3829 * e1000_disable_gig_wol_ich8lan - disable gig during WoL
3830 * @hw: pointer to the HW structure
3832 * During S0 to Sx transition, it is possible the link remains at gig
3833 * instead of negotiating to a lower speed. Before going to Sx, set
3834 * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation
3837 * Should only be called for applicable parts.
3839 void e1000_disable_gig_wol_ich8lan(struct e1000_hw *hw)
3844 DEBUGFUNC("e1000_disable_gig_wol_ich8lan");
3846 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3847 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE;
3848 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3850 if (hw->mac.type >= e1000_pchlan) {
3851 e1000_oem_bits_config_ich8lan(hw, FALSE);
3852 ret_val = hw->phy.ops.acquire(hw);
3855 e1000_write_smbus_addr(hw);
3856 hw->phy.ops.release(hw);
3863 * e1000_cleanup_led_ich8lan - Restore the default LED operation
3864 * @hw: pointer to the HW structure
3866 * Return the LED back to the default configuration.
3868 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
3870 DEBUGFUNC("e1000_cleanup_led_ich8lan");
3872 if (hw->phy.type == e1000_phy_ife)
3873 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3876 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
3877 return E1000_SUCCESS;
3881 * e1000_led_on_ich8lan - Turn LEDs on
3882 * @hw: pointer to the HW structure
3886 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
3888 DEBUGFUNC("e1000_led_on_ich8lan");
3890 if (hw->phy.type == e1000_phy_ife)
3891 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3892 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
3894 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
3895 return E1000_SUCCESS;
3899 * e1000_led_off_ich8lan - Turn LEDs off
3900 * @hw: pointer to the HW structure
3902 * Turn off the LEDs.
3904 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
3906 DEBUGFUNC("e1000_led_off_ich8lan");
3908 if (hw->phy.type == e1000_phy_ife)
3909 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3910 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
3912 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
3913 return E1000_SUCCESS;
3917 * e1000_setup_led_pchlan - Configures SW controllable LED
3918 * @hw: pointer to the HW structure
3920 * This prepares the SW controllable LED for use.
3922 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
3924 DEBUGFUNC("e1000_setup_led_pchlan");
3926 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
3927 (u16)hw->mac.ledctl_mode1);
3931 * e1000_cleanup_led_pchlan - Restore the default LED operation
3932 * @hw: pointer to the HW structure
3934 * Return the LED back to the default configuration.
3936 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
3938 DEBUGFUNC("e1000_cleanup_led_pchlan");
3940 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
3941 (u16)hw->mac.ledctl_default);
3945 * e1000_led_on_pchlan - Turn LEDs on
3946 * @hw: pointer to the HW structure
3950 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
3952 u16 data = (u16)hw->mac.ledctl_mode2;
3955 DEBUGFUNC("e1000_led_on_pchlan");
3958 * If no link, then turn LED on by setting the invert bit
3959 * for each LED that's mode is "link_up" in ledctl_mode2.
3961 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
3962 for (i = 0; i < 3; i++) {
3963 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
3964 if ((led & E1000_PHY_LED0_MODE_MASK) !=
3965 E1000_LEDCTL_MODE_LINK_UP)
3967 if (led & E1000_PHY_LED0_IVRT)
3968 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
3970 data |= (E1000_PHY_LED0_IVRT << (i * 5));
3974 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
3978 * e1000_led_off_pchlan - Turn LEDs off
3979 * @hw: pointer to the HW structure
3981 * Turn off the LEDs.
3983 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
3985 u16 data = (u16)hw->mac.ledctl_mode1;
3988 DEBUGFUNC("e1000_led_off_pchlan");
3991 * If no link, then turn LED off by clearing the invert bit
3992 * for each LED that's mode is "link_up" in ledctl_mode1.
3994 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
3995 for (i = 0; i < 3; i++) {
3996 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
3997 if ((led & E1000_PHY_LED0_MODE_MASK) !=
3998 E1000_LEDCTL_MODE_LINK_UP)
4000 if (led & E1000_PHY_LED0_IVRT)
4001 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4003 data |= (E1000_PHY_LED0_IVRT << (i * 5));
4007 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4011 * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
4012 * @hw: pointer to the HW structure
4014 * Read appropriate register for the config done bit for completion status
4015 * and configure the PHY through s/w for EEPROM-less parts.
4017 * NOTE: some silicon which is EEPROM-less will fail trying to read the
4018 * config done bit, so only an error is logged and continues. If we were
4019 * to return with error, EEPROM-less silicon would not be able to be reset
4022 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
4024 s32 ret_val = E1000_SUCCESS;
4028 DEBUGFUNC("e1000_get_cfg_done_ich8lan");
4030 e1000_get_cfg_done_generic(hw);
4032 /* Wait for indication from h/w that it has completed basic config */
4033 if (hw->mac.type >= e1000_ich10lan) {
4034 e1000_lan_init_done_ich8lan(hw);
4036 ret_val = e1000_get_auto_rd_done_generic(hw);
4039 * When auto config read does not complete, do not
4040 * return with an error. This can happen in situations
4041 * where there is no eeprom and prevents getting link.
4043 DEBUGOUT("Auto Read Done did not complete\n");
4044 ret_val = E1000_SUCCESS;
4048 /* Clear PHY Reset Asserted bit */
4049 status = E1000_READ_REG(hw, E1000_STATUS);
4050 if (status & E1000_STATUS_PHYRA)
4051 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
4053 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
4055 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
4056 if (hw->mac.type <= e1000_ich9lan) {
4057 if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
4058 (hw->phy.type == e1000_phy_igp_3)) {
4059 e1000_phy_init_script_igp3(hw);
4062 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
4063 /* Maybe we should do a basic PHY config */
4064 DEBUGOUT("EEPROM not present\n");
4065 ret_val = -E1000_ERR_CONFIG;
4073 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
4074 * @hw: pointer to the HW structure
4076 * In the case of a PHY power down to save power, or to turn off link during a
4077 * driver unload, or wake on lan is not enabled, remove the link.
4079 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
4081 /* If the management interface is not enabled, then power down */
4082 if (!(hw->mac.ops.check_mng_mode(hw) ||
4083 hw->phy.ops.check_reset_block(hw)))
4084 e1000_power_down_phy_copper(hw);
4090 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
4091 * @hw: pointer to the HW structure
4093 * Clears hardware counters specific to the silicon family and calls
4094 * clear_hw_cntrs_generic to clear all general purpose counters.
4096 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
4100 DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
4102 e1000_clear_hw_cntrs_base_generic(hw);
4104 E1000_READ_REG(hw, E1000_ALGNERRC);
4105 E1000_READ_REG(hw, E1000_RXERRC);
4106 E1000_READ_REG(hw, E1000_TNCRS);
4107 E1000_READ_REG(hw, E1000_CEXTERR);
4108 E1000_READ_REG(hw, E1000_TSCTC);
4109 E1000_READ_REG(hw, E1000_TSCTFC);
4111 E1000_READ_REG(hw, E1000_MGTPRC);
4112 E1000_READ_REG(hw, E1000_MGTPDC);
4113 E1000_READ_REG(hw, E1000_MGTPTC);
4115 E1000_READ_REG(hw, E1000_IAC);
4116 E1000_READ_REG(hw, E1000_ICRXOC);
4118 /* Clear PHY statistics registers */
4119 if ((hw->phy.type == e1000_phy_82578) ||
4120 (hw->phy.type == e1000_phy_82579) ||
4121 (hw->phy.type == e1000_phy_82577)) {
4122 hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data);
4123 hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data);
4124 hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data);
4125 hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data);
4126 hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data);
4127 hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data);
4128 hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data);
4129 hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data);
4130 hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data);
4131 hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data);
4132 hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data);
4133 hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data);
4134 hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data);
4135 hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data);