ig_hal/em/emx: Add I219 (Skylake) support
[dragonfly.git] / sys / dev / netif / ig_hal / e1000_ich8lan.c
CommitLineData
9c80d176
SZ
1/******************************************************************************
2
4765c386 3 Copyright (c) 2001-2014, Intel Corporation
9c80d176
SZ
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
4be59a01 33/*$FreeBSD:$*/
9c80d176 34
379ebbe7 35/* 82562G 10/100 Network Connection
9c80d176
SZ
36 * 82562G-2 10/100 Network Connection
37 * 82562GT 10/100 Network Connection
38 * 82562GT-2 10/100 Network Connection
39 * 82562V 10/100 Network Connection
40 * 82562V-2 10/100 Network Connection
41 * 82566DC-2 Gigabit Network Connection
42 * 82566DC Gigabit Network Connection
43 * 82566DM-2 Gigabit Network Connection
44 * 82566DM Gigabit Network Connection
45 * 82566MC Gigabit Network Connection
46 * 82566MM Gigabit Network Connection
47 * 82567LM Gigabit Network Connection
48 * 82567LF Gigabit Network Connection
49 * 82567V Gigabit Network Connection
50 * 82567LM-2 Gigabit Network Connection
51 * 82567LF-2 Gigabit Network Connection
52 * 82567V-2 Gigabit Network Connection
53 * 82567LF-3 Gigabit Network Connection
54 * 82567LM-3 Gigabit Network Connection
55 * 82567LM-4 Gigabit Network Connection
6a5a645e
SZ
56 * 82577LM Gigabit Network Connection
57 * 82577LC Gigabit Network Connection
58 * 82578DM Gigabit Network Connection
59 * 82578DC Gigabit Network Connection
60 * 82579LM Gigabit Network Connection
61 * 82579V Gigabit Network Connection
4765c386
MN
62 * Ethernet Connection I217-LM
63 * Ethernet Connection I217-V
64 * Ethernet Connection I218-V
65 * Ethernet Connection I218-LM
66 * Ethernet Connection (2) I218-LM
67 * Ethernet Connection (2) I218-V
68 * Ethernet Connection (3) I218-LM
69 * Ethernet Connection (3) I218-V
9c80d176
SZ
70 */
71
72#include "e1000_api.h"
73
9c80d176
SZ
74static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
75static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
6a5a645e
SZ
76static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
77static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
9c80d176 78static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
6a5a645e 79static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
4765c386
MN
80static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
81static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
82static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
6d5e2922 83static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
4be59a01
SZ
84 u8 *mc_addr_list,
85 u32 mc_addr_count);
9c80d176 86static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
9c80d176 87static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
6a5a645e 88static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
9c80d176 89static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
4be59a01 90 bool active);
9c80d176 91static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
4be59a01 92 bool active);
9c80d176 93static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
4be59a01 94 u16 words, u16 *data);
524ce499
SZ
95static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset,
96 u16 words, u16 *data);
9c80d176 97static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
4be59a01 98 u16 words, u16 *data);
9c80d176
SZ
99static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
100static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
524ce499 101static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw);
9c80d176 102static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
4be59a01 103 u16 *data);
6a5a645e 104static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
9c80d176
SZ
105static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
106static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw);
107static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw);
108static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
109static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
379ebbe7 110static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
9c80d176 111static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
4be59a01 112 u16 *speed, u16 *duplex);
9c80d176
SZ
113static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
114static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
115static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
6a5a645e
SZ
116static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
117static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
118static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
119static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
120static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
9c80d176
SZ
121static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
122static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
9c80d176
SZ
123static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
124static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
125static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
4be59a01 126 u32 offset, u8 *data);
9c80d176 127static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
4be59a01 128 u8 size, u16 *data);
524ce499
SZ
129static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
130 u32 *data);
9c80d176 131static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
4be59a01 132 u32 offset, u16 *data);
524ce499
SZ
133static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
134 u32 offset, u32 *data);
9c80d176 135static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
4be59a01 136 u32 offset, u8 byte);
524ce499
SZ
137static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
138 u32 offset, u32 dword);
9c80d176
SZ
139static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
140static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
6a5a645e 141static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
6a5a645e
SZ
142static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
143static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
144static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
379ebbe7 145static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr);
9c80d176
SZ
146
147/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
148/* Offset 04h HSFSTS */
149union ich8_hws_flash_status {
150 struct ich8_hsfsts {
4be59a01
SZ
151 u16 flcdone:1; /* bit 0 Flash Cycle Done */
152 u16 flcerr:1; /* bit 1 Flash Cycle Error */
153 u16 dael:1; /* bit 2 Direct Access error Log */
154 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
155 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
156 u16 reserved1:2; /* bit 13:6 Reserved */
157 u16 reserved2:6; /* bit 13:6 Reserved */
158 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
159 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
9c80d176
SZ
160 } hsf_status;
161 u16 regval;
162};
163
164/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
165/* Offset 06h FLCTL */
166union ich8_hws_flash_ctrl {
167 struct ich8_hsflctl {
4be59a01
SZ
168 u16 flcgo:1; /* 0 Flash Cycle Go */
169 u16 flcycle:2; /* 2:1 Flash Cycle */
170 u16 reserved:5; /* 7:3 Reserved */
171 u16 fldbcount:2; /* 9:8 Flash Data Byte Count */
172 u16 flockdn:6; /* 15:10 Reserved */
9c80d176
SZ
173 } hsf_ctrl;
174 u16 regval;
175};
176
177/* ICH Flash Region Access Permissions */
178union ich8_hws_flash_regacc {
179 struct ich8_flracc {
4be59a01
SZ
180 u32 grra:8; /* 0:7 GbE region Read Access */
181 u32 grwa:8; /* 8:15 GbE region Write Access */
182 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
183 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
9c80d176
SZ
184 } hsf_flregacc;
185 u16 regval;
186};
187
379ebbe7
SZ
188/**
189 * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
190 * @hw: pointer to the HW structure
191 *
192 * Test access to the PHY registers by reading the PHY ID registers. If
193 * the PHY ID is already known (e.g. resume path) compare it with known ID,
194 * otherwise assume the read PHY ID is correct if it is valid.
195 *
196 * Assumes the sw/fw/hw semaphore is already acquired.
197 **/
198static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
6d5e2922 199{
379ebbe7
SZ
200 u16 phy_reg = 0;
201 u32 phy_id = 0;
4765c386 202 s32 ret_val = 0;
379ebbe7 203 u16 retry_count;
4765c386 204 u32 mac_reg = 0;
6d5e2922 205
379ebbe7
SZ
206 for (retry_count = 0; retry_count < 2; retry_count++) {
207 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
208 if (ret_val || (phy_reg == 0xFFFF))
209 continue;
210 phy_id = (u32)(phy_reg << 16);
6d5e2922 211
379ebbe7
SZ
212 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
213 if (ret_val || (phy_reg == 0xFFFF)) {
214 phy_id = 0;
215 continue;
216 }
217 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
218 break;
219 }
220
221 if (hw->phy.id) {
222 if (hw->phy.id == phy_id)
4765c386 223 goto out;
379ebbe7
SZ
224 } else if (phy_id) {
225 hw->phy.id = phy_id;
226 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
4765c386 227 goto out;
379ebbe7
SZ
228 }
229
230 /* In case the PHY needs to be in mdio slow mode,
231 * set slow mode and try to get the PHY id again.
232 */
4765c386
MN
233 if (hw->mac.type < e1000_pch_lpt) {
234 hw->phy.ops.release(hw);
235 ret_val = e1000_set_mdio_slow_mode_hv(hw);
236 if (!ret_val)
237 ret_val = e1000_get_phy_id(hw);
238 hw->phy.ops.acquire(hw);
239 }
240
241 if (ret_val)
242 return FALSE;
243out:
524ce499
SZ
244 if (hw->mac.type == e1000_pch_lpt ||
245 hw->mac.type == e1000_pch_spt) {
4765c386
MN
246 /* Unforce SMBus mode in PHY */
247 hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
248 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
249 hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
250
251 /* Unforce SMBus mode in MAC */
252 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
253 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
254 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
255 }
256
257 return TRUE;
258}
259
260/**
261 * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
262 * @hw: pointer to the HW structure
263 *
264 * Toggling the LANPHYPC pin value fully power-cycles the PHY and is
265 * used to reset the PHY to a quiescent state when necessary.
266 **/
267static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
268{
269 u32 mac_reg;
270
271 DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
272
273 /* Set Phy Config Counter to 50msec */
274 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
275 mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
276 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
277 E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
278
279 /* Toggle LANPHYPC Value bit */
280 mac_reg = E1000_READ_REG(hw, E1000_CTRL);
281 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
282 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
283 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
284 E1000_WRITE_FLUSH(hw);
285 usec_delay(10);
286 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
287 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
288 E1000_WRITE_FLUSH(hw);
289
290 if (hw->mac.type < e1000_pch_lpt) {
291 msec_delay(50);
292 } else {
293 u16 count = 20;
294
295 do {
296 msec_delay(5);
297 } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
298 E1000_CTRL_EXT_LPCD) && count--);
379ebbe7 299
4765c386
MN
300 msec_delay(30);
301 }
379ebbe7
SZ
302}
303
304/**
305 * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
306 * @hw: pointer to the HW structure
307 *
308 * Workarounds/flow necessary for PHY initialization during driver load
309 * and resume paths.
310 **/
311static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
312{
313 u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
314 s32 ret_val;
379ebbe7
SZ
315
316 DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
317
318 /* Gate automatic PHY configuration by hardware on managed and
319 * non-managed 82579 and newer adapters.
320 */
321 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
322
4765c386
MN
323 /* It is not possible to be certain of the current state of ULP
324 * so forcibly disable it.
325 */
326 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
327 e1000_disable_ulp_lpt_lp(hw, TRUE);
328
379ebbe7
SZ
329 ret_val = hw->phy.ops.acquire(hw);
330 if (ret_val) {
331 DEBUGOUT("Failed to initialize PHY flow\n");
332 goto out;
333 }
334
335 /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
336 * inaccessible and resetting the PHY is not blocked, toggle the
337 * LANPHYPC Value bit to force the interconnect to PCIe mode.
338 */
339 switch (hw->mac.type) {
340 case e1000_pch_lpt:
524ce499 341 case e1000_pch_spt:
379ebbe7
SZ
342 if (e1000_phy_is_accessible_pchlan(hw))
343 break;
344
345 /* Before toggling LANPHYPC, see if PHY is accessible by
346 * forcing MAC to SMBus mode first.
347 */
348 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
349 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
350 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
351
4765c386
MN
352 /* Wait 50 milliseconds for MAC to finish any retries
353 * that it might be trying to perform from previous
354 * attempts to acknowledge any phy read requests.
355 */
356 msec_delay(50);
357
379ebbe7
SZ
358 /* fall-through */
359 case e1000_pch2lan:
4765c386 360 if (e1000_phy_is_accessible_pchlan(hw))
379ebbe7 361 break;
379ebbe7
SZ
362
363 /* fall-through */
364 case e1000_pchlan:
365 if ((hw->mac.type == e1000_pchlan) &&
366 (fwsm & E1000_ICH_FWSM_FW_VALID))
367 break;
368
369 if (hw->phy.ops.check_reset_block(hw)) {
370 DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
4765c386 371 ret_val = -E1000_ERR_PHY;
379ebbe7
SZ
372 break;
373 }
374
4765c386
MN
375 /* Toggle LANPHYPC Value bit */
376 e1000_toggle_lanphypc_pch_lpt(hw);
377 if (hw->mac.type >= e1000_pch_lpt) {
378 if (e1000_phy_is_accessible_pchlan(hw))
379 break;
379ebbe7 380
379ebbe7 381 /* Toggling LANPHYPC brings the PHY out of SMBus mode
4765c386 382 * so ensure that the MAC is also out of SMBus mode
379ebbe7
SZ
383 */
384 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
385 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
386 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
379ebbe7 387
4765c386
MN
388 if (e1000_phy_is_accessible_pchlan(hw))
389 break;
390
391 ret_val = -E1000_ERR_PHY;
379ebbe7
SZ
392 }
393 break;
394 default:
395 break;
396 }
397
398 hw->phy.ops.release(hw);
4765c386 399 if (!ret_val) {
379ebbe7 400
4765c386
MN
401 /* Check to see if able to reset PHY. Print error if not */
402 if (hw->phy.ops.check_reset_block(hw)) {
403 ERROR_REPORT("Reset blocked by ME\n");
404 goto out;
405 }
406
407 /* Reset the PHY before any access to it. Doing so, ensures
408 * that the PHY is in a known good state before we read/write
409 * PHY registers. The generic reset is sufficient here,
410 * because we haven't determined the PHY type yet.
411 */
412 ret_val = e1000_phy_hw_reset_generic(hw);
413 if (ret_val)
414 goto out;
415
416 /* On a successful reset, possibly need to wait for the PHY
417 * to quiesce to an accessible state before returning control
418 * to the calling function. If the PHY does not quiesce, then
419 * return E1000E_BLK_PHY_RESET, as this is the condition that
420 * the PHY is in.
421 */
422 ret_val = hw->phy.ops.check_reset_block(hw);
423 if (ret_val)
424 ERROR_REPORT("ME blocked access to PHY after reset\n");
425 }
379ebbe7
SZ
426
427out:
428 /* Ungate automatic PHY configuration on non-managed 82579 */
429 if ((hw->mac.type == e1000_pch2lan) &&
430 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
431 msec_delay(10);
432 e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
433 }
434
435 return ret_val;
6d5e2922
SZ
436}
437
6a5a645e
SZ
438/**
439 * e1000_init_phy_params_pchlan - Initialize PHY function pointers
440 * @hw: pointer to the HW structure
441 *
442 * Initialize family-specific PHY parameters and function pointers.
443 **/
444static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
445{
446 struct e1000_phy_info *phy = &hw->phy;
379ebbe7 447 s32 ret_val;
6a5a645e
SZ
448
449 DEBUGFUNC("e1000_init_phy_params_pchlan");
450
4be59a01
SZ
451 phy->addr = 1;
452 phy->reset_delay_us = 100;
453
454 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
455 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
456 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
457 phy->ops.set_page = e1000_set_page_igp;
458 phy->ops.read_reg = e1000_read_phy_reg_hv;
459 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
460 phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
461 phy->ops.release = e1000_release_swflag_ich8lan;
462 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
463 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
464 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
465 phy->ops.write_reg = e1000_write_phy_reg_hv;
466 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
467 phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
468 phy->ops.power_up = e1000_power_up_phy_copper;
469 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
470 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
6a5a645e 471
379ebbe7 472 phy->id = e1000_phy_unknown;
6a5a645e 473
379ebbe7
SZ
474 ret_val = e1000_init_phy_workarounds_pchlan(hw);
475 if (ret_val)
476 return ret_val;
6a5a645e 477
379ebbe7
SZ
478 if (phy->id == e1000_phy_unknown)
479 switch (hw->mac.type) {
480 default:
481 ret_val = e1000_get_phy_id(hw);
482 if (ret_val)
483 return ret_val;
484 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
485 break;
486 /* fall-through */
487 case e1000_pch2lan:
488 case e1000_pch_lpt:
524ce499 489 case e1000_pch_spt:
379ebbe7
SZ
490 /* In case the PHY needs to be in mdio slow mode,
491 * set slow mode and try to get the PHY id again.
492 */
493 ret_val = e1000_set_mdio_slow_mode_hv(hw);
494 if (ret_val)
495 return ret_val;
496 ret_val = e1000_get_phy_id(hw);
497 if (ret_val)
498 return ret_val;
6a5a645e 499 break;
379ebbe7 500 }
6a5a645e
SZ
501 phy->type = e1000_get_phy_type_from_id(phy->id);
502
503 switch (phy->type) {
504 case e1000_phy_82577:
505 case e1000_phy_82579:
379ebbe7 506 case e1000_phy_i217:
6a5a645e
SZ
507 phy->ops.check_polarity = e1000_check_polarity_82577;
508 phy->ops.force_speed_duplex =
509 e1000_phy_force_speed_duplex_82577;
510 phy->ops.get_cable_length = e1000_get_cable_length_82577;
511 phy->ops.get_info = e1000_get_phy_info_82577;
512 phy->ops.commit = e1000_phy_sw_reset_generic;
513 break;
514 case e1000_phy_82578:
515 phy->ops.check_polarity = e1000_check_polarity_m88;
516 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
517 phy->ops.get_cable_length = e1000_get_cable_length_m88;
518 phy->ops.get_info = e1000_get_phy_info_m88;
519 break;
520 default:
521 ret_val = -E1000_ERR_PHY;
522 break;
523 }
524
6a5a645e
SZ
525 return ret_val;
526}
527
9c80d176
SZ
528/**
529 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
530 * @hw: pointer to the HW structure
531 *
532 * Initialize family-specific PHY parameters and function pointers.
533 **/
534static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
535{
536 struct e1000_phy_info *phy = &hw->phy;
4be59a01 537 s32 ret_val;
9c80d176
SZ
538 u16 i = 0;
539
540 DEBUGFUNC("e1000_init_phy_params_ich8lan");
541
4be59a01
SZ
542 phy->addr = 1;
543 phy->reset_delay_us = 100;
544
545 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
546 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
547 phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
548 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
549 phy->ops.read_reg = e1000_read_phy_reg_igp;
550 phy->ops.release = e1000_release_swflag_ich8lan;
551 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
552 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
553 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
554 phy->ops.write_reg = e1000_write_phy_reg_igp;
555 phy->ops.power_up = e1000_power_up_phy_copper;
556 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
9c80d176 557
379ebbe7 558 /* We may need to do this twice - once for IGP and if that fails,
9c80d176
SZ
559 * we'll set BM func pointers and try again
560 */
561 ret_val = e1000_determine_phy_address(hw);
562 if (ret_val) {
563 phy->ops.write_reg = e1000_write_phy_reg_bm;
564 phy->ops.read_reg = e1000_read_phy_reg_bm;
565 ret_val = e1000_determine_phy_address(hw);
566 if (ret_val) {
6a5a645e 567 DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
4be59a01 568 return ret_val;
9c80d176
SZ
569 }
570 }
571
572 phy->id = 0;
573 while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
574 (i++ < 100)) {
575 msec_delay(1);
576 ret_val = e1000_get_phy_id(hw);
577 if (ret_val)
4be59a01 578 return ret_val;
9c80d176
SZ
579 }
580
581 /* Verify phy id */
582 switch (phy->id) {
583 case IGP03E1000_E_PHY_ID:
584 phy->type = e1000_phy_igp_3;
585 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
6a5a645e
SZ
586 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
587 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
588 phy->ops.get_info = e1000_get_phy_info_igp;
589 phy->ops.check_polarity = e1000_check_polarity_igp;
590 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
9c80d176
SZ
591 break;
592 case IFE_E_PHY_ID:
593 case IFE_PLUS_E_PHY_ID:
594 case IFE_C_E_PHY_ID:
595 phy->type = e1000_phy_ife;
596 phy->autoneg_mask = E1000_ALL_NOT_GIG;
6a5a645e
SZ
597 phy->ops.get_info = e1000_get_phy_info_ife;
598 phy->ops.check_polarity = e1000_check_polarity_ife;
599 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
9c80d176
SZ
600 break;
601 case BME1000_E_PHY_ID:
602 phy->type = e1000_phy_bm;
603 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
604 phy->ops.read_reg = e1000_read_phy_reg_bm;
605 phy->ops.write_reg = e1000_write_phy_reg_bm;
606 phy->ops.commit = e1000_phy_sw_reset_generic;
6a5a645e
SZ
607 phy->ops.get_info = e1000_get_phy_info_m88;
608 phy->ops.check_polarity = e1000_check_polarity_m88;
609 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
9c80d176
SZ
610 break;
611 default:
4be59a01
SZ
612 return -E1000_ERR_PHY;
613 break;
9c80d176
SZ
614 }
615
4be59a01 616 return E1000_SUCCESS;
9c80d176
SZ
617}
618
619/**
620 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
621 * @hw: pointer to the HW structure
622 *
623 * Initialize family-specific NVM parameters and function
624 * pointers.
625 **/
626static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
627{
628 struct e1000_nvm_info *nvm = &hw->nvm;
629 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
630 u32 gfpreg, sector_base_addr, sector_end_addr;
9c80d176 631 u16 i;
524ce499 632 u32 nvm_size;
9c80d176
SZ
633
634 DEBUGFUNC("e1000_init_nvm_params_ich8lan");
635
636 /* Can't read flash registers if the register set isn't mapped. */
4765c386 637 nvm->type = e1000_nvm_flash_sw;
9c80d176 638
524ce499
SZ
639 /* XXX turn flash_address into flash_reg_off or something more appropriate */
640#define E1000_FLASH_BASE_ADDR 0xE000 /* offset of NVM access regs */
641#define NVM_SIZE_MULTIPLIER 4096
9c80d176 642
524ce499
SZ
643 if (hw->mac.type == e1000_pch_spt) {
644 /*
645 * In SPT the flash is in the GbE flash region of the
646 * main hw map. GFPREG does not exist. Take NVM size from
647 * the STRAP register.
648 */
649 nvm->flash_base_addr = 0;
650 nvm_size = (((E1000_READ_REG(hw, E1000_STRAP) >> 1) & 0x1F) + 1)
651 * NVM_SIZE_MULTIPLIER;
652 nvm->flash_bank_size = nvm_size / 2;
653 /* Adjust to word count */
654 nvm->flash_bank_size /= sizeof(u16);
655 /* Set the base address for flash register access */
656 hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
657 } else {
658 if (!hw->flash_address) {
659 DEBUGOUT("ERROR: Flash registers not mapped\n");
660 return -E1000_ERR_CONFIG;
661 }
9c80d176 662
524ce499 663 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
9c80d176 664
524ce499
SZ
665 /* sector_X_addr is a "sector"-aligned address (4096 bytes)
666 * Add 1 to sector_end_addr since this sector is included in
667 * the overall size.
668 */
669 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
670 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
671
672 /* flash_base_addr is byte-aligned */
673 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
674
675 /* find total size of the NVM, then cut in half since the total
676 * size represents two separate NVM banks.
677 */
678 nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
679 << FLASH_SECTOR_ADDR_SHIFT);
680 nvm->flash_bank_size /= 2;
681 /* Adjust to word count */
682 nvm->flash_bank_size /= sizeof(u16);
683 }
9c80d176
SZ
684
685 nvm->word_size = E1000_SHADOW_RAM_WORDS;
686
687 /* Clear shadow ram */
688 for (i = 0; i < nvm->word_size; i++) {
689 dev_spec->shadow_ram[i].modified = FALSE;
690 dev_spec->shadow_ram[i].value = 0xFFFF;
691 }
692
693 /* Function Pointers */
4be59a01
SZ
694 nvm->ops.acquire = e1000_acquire_nvm_ich8lan;
695 nvm->ops.release = e1000_release_nvm_ich8lan;
524ce499
SZ
696 if (hw->mac.type == e1000_pch_spt) {
697 nvm->ops.read = e1000_read_nvm_spt;
698 nvm->ops.update = e1000_update_nvm_checksum_spt;
699 } else {
700 nvm->ops.read = e1000_read_nvm_ich8lan;
701 nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
702 }
9c80d176 703 nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
4be59a01
SZ
704 nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan;
705 nvm->ops.write = e1000_write_nvm_ich8lan;
9c80d176 706
4be59a01 707 return E1000_SUCCESS;
9c80d176
SZ
708}
709
710/**
711 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
712 * @hw: pointer to the HW structure
713 *
714 * Initialize family-specific MAC parameters and function
715 * pointers.
716 **/
717static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
718{
719 struct e1000_mac_info *mac = &hw->mac;
379ebbe7 720 u16 pci_cfg;
9c80d176
SZ
721
722 DEBUGFUNC("e1000_init_mac_params_ich8lan");
723
724 /* Set media type function pointer */
725 hw->phy.media_type = e1000_media_type_copper;
726
727 /* Set mta register count */
728 mac->mta_reg_count = 32;
729 /* Set rar entry count */
730 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
731 if (mac->type == e1000_ich8lan)
732 mac->rar_entry_count--;
733 /* Set if part includes ASF firmware */
734 mac->asf_firmware_present = TRUE;
6a5a645e
SZ
735 /* FWSM register */
736 mac->has_fwsm = TRUE;
737 /* ARC subsystem not supported */
738 mac->arc_subsystem_valid = FALSE;
739 /* Adaptive IFS supported */
740 mac->adaptive_ifs = TRUE;
9c80d176
SZ
741
742 /* Function pointers */
743
744 /* bus type/speed/width */
745 mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
746 /* function id */
747 mac->ops.set_lan_id = e1000_set_lan_id_single_port;
748 /* reset */
749 mac->ops.reset_hw = e1000_reset_hw_ich8lan;
750 /* hw initialization */
751 mac->ops.init_hw = e1000_init_hw_ich8lan;
752 /* link setup */
753 mac->ops.setup_link = e1000_setup_link_ich8lan;
754 /* physical interface setup */
755 mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
756 /* check for link */
6a5a645e 757 mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
9c80d176
SZ
758 /* link info */
759 mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
760 /* multicast address update */
761 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
9c80d176
SZ
762 /* clear hardware counters */
763 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
764
4be59a01 765 /* LED and other operations */
6a5a645e
SZ
766 switch (mac->type) {
767 case e1000_ich8lan:
768 case e1000_ich9lan:
769 case e1000_ich10lan:
770 /* check management mode */
771 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
772 /* ID LED init */
773 mac->ops.id_led_init = e1000_id_led_init_generic;
774 /* blink LED */
775 mac->ops.blink_led = e1000_blink_led_generic;
776 /* setup LED */
777 mac->ops.setup_led = e1000_setup_led_generic;
778 /* cleanup LED */
779 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
780 /* turn on/off LED */
781 mac->ops.led_on = e1000_led_on_ich8lan;
782 mac->ops.led_off = e1000_led_off_ich8lan;
783 break;
784 case e1000_pch2lan:
785 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
786 mac->ops.rar_set = e1000_rar_set_pch2lan;
379ebbe7
SZ
787 /* fall-through */
788 case e1000_pch_lpt:
524ce499 789 case e1000_pch_spt:
6d5e2922
SZ
790 /* multicast address update for pch2 */
791 mac->ops.update_mc_addr_list =
792 e1000_update_mc_addr_list_pch2lan;
6a5a645e 793 case e1000_pchlan:
379ebbe7
SZ
794 /* save PCH revision_id */
795 e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
796 hw->revision_id = (u8)(pci_cfg &= 0x000F);
6a5a645e
SZ
797 /* check management mode */
798 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
799 /* ID LED init */
800 mac->ops.id_led_init = e1000_id_led_init_pchlan;
801 /* setup LED */
802 mac->ops.setup_led = e1000_setup_led_pchlan;
803 /* cleanup LED */
804 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
805 /* turn on/off LED */
806 mac->ops.led_on = e1000_led_on_pchlan;
807 mac->ops.led_off = e1000_led_off_pchlan;
808 break;
809 default:
810 break;
811 }
812
524ce499
SZ
813 if (mac->type == e1000_pch_lpt ||
814 mac->type == e1000_pch_spt) {
379ebbe7
SZ
815 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
816 mac->ops.rar_set = e1000_rar_set_pch_lpt;
817 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
818 mac->ops.set_obff_timer = e1000_set_obff_timer_pch_lpt;
819 }
820
9c80d176
SZ
821 /* Enable PCS Lock-loss workaround for ICH8 */
822 if (mac->type == e1000_ich8lan)
823 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
824
9c80d176
SZ
825 return E1000_SUCCESS;
826}
827
379ebbe7
SZ
828/**
829 * __e1000_access_emi_reg_locked - Read/write EMI register
830 * @hw: pointer to the HW structure
831 * @addr: EMI address to program
832 * @data: pointer to value to read/write from/to the EMI address
833 * @read: boolean flag to indicate read or write
834 *
835 * This helper function assumes the SW/FW/HW Semaphore is already acquired.
836 **/
837static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
838 u16 *data, bool read)
839{
840 s32 ret_val;
841
842 DEBUGFUNC("__e1000_access_emi_reg_locked");
843
844 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
845 if (ret_val)
846 return ret_val;
847
848 if (read)
849 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
850 data);
851 else
852 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
853 *data);
854
855 return ret_val;
856}
857
858/**
859 * e1000_read_emi_reg_locked - Read Extended Management Interface register
860 * @hw: pointer to the HW structure
861 * @addr: EMI address to program
862 * @data: value to be read from the EMI address
863 *
864 * Assumes the SW/FW/HW Semaphore is already acquired.
865 **/
866s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
867{
868 DEBUGFUNC("e1000_read_emi_reg_locked");
869
870 return __e1000_access_emi_reg_locked(hw, addr, data, TRUE);
871}
872
873/**
874 * e1000_write_emi_reg_locked - Write Extended Management Interface register
875 * @hw: pointer to the HW structure
876 * @addr: EMI address to program
877 * @data: value to be written to the EMI address
878 *
879 * Assumes the SW/FW/HW Semaphore is already acquired.
880 **/
4765c386 881s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
379ebbe7
SZ
882{
883 DEBUGFUNC("e1000_read_emi_reg_locked");
884
885 return __e1000_access_emi_reg_locked(hw, addr, &data, FALSE);
886}
887
6a5a645e
SZ
888/**
889 * e1000_set_eee_pchlan - Enable/disable EEE support
890 * @hw: pointer to the HW structure
891 *
379ebbe7
SZ
892 * Enable/disable EEE based on setting in dev_spec structure, the duplex of
893 * the link and the EEE capabilities of the link partner. The LPI Control
894 * register bits will remain set only if/when link is up.
4765c386
MN
895 *
896 * EEE LPI must not be asserted earlier than one second after link is up.
897 * On 82579, EEE LPI should not be enabled until such time otherwise there
898 * can be link issues with some switches. Other devices can have EEE LPI
899 * enabled immediately upon link up since they have a timer in hardware which
900 * prevents LPI from being asserted too early.
6a5a645e 901 **/
4765c386 902s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
6a5a645e 903{
4be59a01 904 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
379ebbe7 905 s32 ret_val;
4765c386 906 u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
6a5a645e
SZ
907
908 DEBUGFUNC("e1000_set_eee_pchlan");
909
4765c386
MN
910 switch (hw->phy.type) {
911 case e1000_phy_82579:
912 lpa = I82579_EEE_LP_ABILITY;
913 pcs_status = I82579_EEE_PCS_STATUS;
914 adv_addr = I82579_EEE_ADVERTISEMENT;
915 break;
916 case e1000_phy_i217:
917 lpa = I217_EEE_LP_ABILITY;
918 pcs_status = I217_EEE_PCS_STATUS;
919 adv_addr = I217_EEE_ADVERTISEMENT;
920 break;
921 default:
4be59a01 922 return E1000_SUCCESS;
4765c386 923 }
6a5a645e 924
379ebbe7 925 ret_val = hw->phy.ops.acquire(hw);
6a5a645e 926 if (ret_val)
4be59a01 927 return ret_val;
6a5a645e 928
379ebbe7
SZ
929 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
930 if (ret_val)
931 goto release;
932
933 /* Clear bits that enable EEE in various speeds */
934 lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
935
936 /* Enable EEE if not disabled by user */
937 if (!dev_spec->eee_disable) {
379ebbe7 938 /* Save off link partner's EEE ability */
379ebbe7
SZ
939 ret_val = e1000_read_emi_reg_locked(hw, lpa,
940 &dev_spec->eee_lp_ability);
941 if (ret_val)
942 goto release;
943
4765c386
MN
944 /* Read EEE advertisement */
945 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
946 if (ret_val)
947 goto release;
948
379ebbe7 949 /* Enable EEE only for speeds in which the link partner is
4765c386 950 * EEE capable and for which we advertise EEE.
379ebbe7 951 */
4765c386 952 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
379ebbe7
SZ
953 lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
954
4765c386 955 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
379ebbe7
SZ
956 hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
957 if (data & NWAY_LPAR_100TX_FD_CAPS)
958 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
959 else
960 /* EEE is not supported in 100Half, so ignore
961 * partner's EEE in 100 ability if full-duplex
962 * is not advertised.
963 */
964 dev_spec->eee_lp_ability &=
965 ~I82579_EEE_100_SUPPORTED;
966 }
4765c386 967 }
379ebbe7 968
4765c386
MN
969 if (hw->phy.type == e1000_phy_82579) {
970 ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
971 &data);
379ebbe7
SZ
972 if (ret_val)
973 goto release;
4765c386
MN
974
975 data &= ~I82579_LPI_100_PLL_SHUT;
976 ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
977 data);
379ebbe7
SZ
978 }
979
4765c386
MN
980 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
981 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
982 if (ret_val)
983 goto release;
984
379ebbe7
SZ
985 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
986release:
987 hw->phy.ops.release(hw);
988
989 return ret_val;
990}
991
992/**
993 * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
994 * @hw: pointer to the HW structure
995 * @link: link up bool flag
996 *
997 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
998 * preventing further DMA write requests. Workaround the issue by disabling
999 * the de-assertion of the clock request when in 1Gpbs mode.
4765c386
MN
1000 * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
1001 * speeds in order to avoid Tx hangs.
379ebbe7
SZ
1002 **/
1003static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
1004{
1005 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
4765c386 1006 u32 status = E1000_READ_REG(hw, E1000_STATUS);
379ebbe7 1007 s32 ret_val = E1000_SUCCESS;
4765c386 1008 u16 reg;
379ebbe7 1009
4765c386 1010 if (link && (status & E1000_STATUS_SPEED_1000)) {
379ebbe7
SZ
1011 ret_val = hw->phy.ops.acquire(hw);
1012 if (ret_val)
1013 return ret_val;
1014
1015 ret_val =
1016 e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
4765c386 1017 &reg);
379ebbe7
SZ
1018 if (ret_val)
1019 goto release;
1020
1021 ret_val =
1022 e1000_write_kmrn_reg_locked(hw,
1023 E1000_KMRNCTRLSTA_K1_CONFIG,
4765c386 1024 reg &
379ebbe7
SZ
1025 ~E1000_KMRNCTRLSTA_K1_ENABLE);
1026 if (ret_val)
1027 goto release;
1028
1029 usec_delay(10);
1030
1031 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
1032 fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
1033
1034 ret_val =
1035 e1000_write_kmrn_reg_locked(hw,
1036 E1000_KMRNCTRLSTA_K1_CONFIG,
4765c386 1037 reg);
379ebbe7
SZ
1038release:
1039 hw->phy.ops.release(hw);
1040 } else {
1041 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
4765c386
MN
1042 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1043
1044 if (!link || ((status & E1000_STATUS_SPEED_100) &&
1045 (status & E1000_STATUS_FD)))
1046 goto update_fextnvm6;
1047
1048 ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
1049 if (ret_val)
1050 return ret_val;
1051
1052 /* Clear link status transmit timeout */
1053 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1054
1055 if (status & E1000_STATUS_SPEED_100) {
1056 /* Set inband Tx timeout to 5x10us for 100Half */
1057 reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1058
1059 /* Do not extend the K1 entry latency for 100Half */
1060 fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1061 } else {
1062 /* Set inband Tx timeout to 50x10us for 10Full/Half */
1063 reg |= 50 <<
1064 I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1065
1066 /* Extend the K1 entry latency for 10 Mbps */
1067 fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1068 }
1069
1070 ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1071 if (ret_val)
1072 return ret_val;
1073
1074update_fextnvm6:
1075 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
379ebbe7
SZ
1076 }
1077
1078 return ret_val;
1079}
6a5a645e 1080
379ebbe7
SZ
1081static u64 e1000_ltr2ns(u16 ltr)
1082{
1083 u32 value, scale;
1084
1085 /* Determine the latency in nsec based on the LTR value & scale */
1086 value = ltr & E1000_LTRV_VALUE_MASK;
1087 scale = (ltr & E1000_LTRV_SCALE_MASK) >> E1000_LTRV_SCALE_SHIFT;
1088
1089 return value * (1 << (scale * E1000_LTRV_SCALE_FACTOR));
1090}
1091
1092/**
1093 * e1000_platform_pm_pch_lpt - Set platform power management values
1094 * @hw: pointer to the HW structure
1095 * @link: bool indicating link status
1096 *
1097 * Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
1098 * GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
1099 * when link is up (which must not exceed the maximum latency supported
1100 * by the platform), otherwise specify there is no LTR requirement.
1101 * Unlike TRUE-PCIe devices which set the LTR maximum snoop/no-snoop
1102 * latencies in the LTR Extended Capability Structure in the PCIe Extended
1103 * Capability register set, on this device LTR is set by writing the
1104 * equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
1105 * set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
1106 * message to the PMC.
1107 *
1108 * Use the LTR value to calculate the Optimized Buffer Flush/Fill (OBFF)
1109 * high-water mark.
1110 **/
1111static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
1112{
1113 u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
1114 link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
1115 u16 lat_enc = 0; /* latency encoded */
1116 s32 obff_hwm = 0;
1117
1118 DEBUGFUNC("e1000_platform_pm_pch_lpt");
1119
1120 if (link) {
1121 u16 speed, duplex, scale = 0;
1122 u16 max_snoop, max_nosnoop;
1123 u16 max_ltr_enc; /* max LTR latency encoded */
1124 s64 lat_ns; /* latency (ns) */
1125 s64 value;
1126 u32 rxa;
1127
1128 if (!hw->mac.max_frame_size) {
1129 DEBUGOUT("max_frame_size not set.\n");
1130 return -E1000_ERR_CONFIG;
1131 }
1132
1133 hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
1134 if (!speed) {
1135 DEBUGOUT("Speed not set.\n");
1136 return -E1000_ERR_CONFIG;
1137 }
1138
1139 /* Rx Packet Buffer Allocation size (KB) */
1140 rxa = E1000_READ_REG(hw, E1000_PBA) & E1000_PBA_RXA_MASK;
1141
1142 /* Determine the maximum latency tolerated by the device.
1143 *
1144 * Per the PCIe spec, the tolerated latencies are encoded as
1145 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
1146 * a 10-bit value (0-1023) to provide a range from 1 ns to
1147 * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
1148 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
1149 */
1150 lat_ns = ((s64)rxa * 1024 -
1151 (2 * (s64)hw->mac.max_frame_size)) * 8 * 1000;
1152 if (lat_ns < 0)
1153 lat_ns = 0;
1154 else
1155 lat_ns /= speed;
1156
1157 value = lat_ns;
1158 while (value > E1000_LTRV_VALUE_MASK) {
1159 scale++;
1160 value = E1000_DIVIDE_ROUND_UP(value, (1 << 5));
1161 }
1162 if (scale > E1000_LTRV_SCALE_MAX) {
1163 DEBUGOUT1("Invalid LTR latency scale %d\n", scale);
1164 return -E1000_ERR_CONFIG;
1165 }
1166 lat_enc = (u16)((scale << E1000_LTRV_SCALE_SHIFT) | value);
1167
1168 /* Determine the maximum latency tolerated by the platform */
1169 e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT, &max_snoop);
1170 e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
1171 max_ltr_enc = E1000_MAX(max_snoop, max_nosnoop);
1172
1173 if (lat_enc > max_ltr_enc) {
1174 lat_enc = max_ltr_enc;
1175 lat_ns = e1000_ltr2ns(max_ltr_enc);
1176 }
1177
1178 if (lat_ns) {
1179 lat_ns *= speed * 1000;
1180 lat_ns /= 8;
1181 lat_ns /= 1000000000;
1182 obff_hwm = (s32)(rxa - lat_ns);
1183 }
379ebbe7
SZ
1184 if ((obff_hwm < 0) || (obff_hwm > E1000_SVT_OFF_HWM_MASK)) {
1185 DEBUGOUT1("Invalid high water mark %d\n", obff_hwm);
1186 return -E1000_ERR_CONFIG;
1187 }
1188 }
1189
1190 /* Set Snoop and No-Snoop latencies the same */
1191 reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
1192 E1000_WRITE_REG(hw, E1000_LTRV, reg);
1193
1194 /* Set OBFF high water mark */
1195 reg = E1000_READ_REG(hw, E1000_SVT) & ~E1000_SVT_OFF_HWM_MASK;
1196 reg |= obff_hwm;
1197 E1000_WRITE_REG(hw, E1000_SVT, reg);
1198
1199 /* Enable OBFF */
1200 reg = E1000_READ_REG(hw, E1000_SVCR);
1201 reg |= E1000_SVCR_OFF_EN;
1202 /* Always unblock interrupts to the CPU even when the system is
1203 * in OBFF mode. This ensures that small round-robin traffic
1204 * (like ping) does not get dropped or experience long latency.
1205 */
1206 reg |= E1000_SVCR_OFF_MASKINT;
1207 E1000_WRITE_REG(hw, E1000_SVCR, reg);
1208
1209 return E1000_SUCCESS;
1210}
1211
1212/**
1213 * e1000_set_obff_timer_pch_lpt - Update Optimized Buffer Flush/Fill timer
1214 * @hw: pointer to the HW structure
1215 * @itr: interrupt throttling rate
1216 *
1217 * Configure OBFF with the updated interrupt rate.
1218 **/
1219static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr)
1220{
1221 u32 svcr;
1222 s32 timer;
1223
1224 DEBUGFUNC("e1000_set_obff_timer_pch_lpt");
1225
1226 /* Convert ITR value into microseconds for OBFF timer */
1227 timer = itr & E1000_ITR_MASK;
1228 timer = (timer * E1000_ITR_MULT) / 1000;
1229
1230 if ((timer < 0) || (timer > E1000_ITR_MASK)) {
1231 DEBUGOUT1("Invalid OBFF timer %d\n", timer);
1232 return -E1000_ERR_CONFIG;
1233 }
1234
1235 svcr = E1000_READ_REG(hw, E1000_SVCR);
1236 svcr &= ~E1000_SVCR_OFF_TIMER_MASK;
1237 svcr |= timer << E1000_SVCR_OFF_TIMER_SHIFT;
1238 E1000_WRITE_REG(hw, E1000_SVCR, svcr);
1239
1240 return E1000_SUCCESS;
6a5a645e
SZ
1241}
1242
4765c386
MN
1243/**
1244 * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1245 * @hw: pointer to the HW structure
1246 * @to_sx: boolean indicating a system power state transition to Sx
1247 *
1248 * When link is down, configure ULP mode to significantly reduce the power
1249 * to the PHY. If on a Manageability Engine (ME) enabled system, tell the
1250 * ME firmware to start the ULP configuration. If not on an ME enabled
1251 * system, configure the ULP mode by software.
1252 */
1253s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1254{
1255 u32 mac_reg;
1256 s32 ret_val = E1000_SUCCESS;
1257 u16 phy_reg;
1258
1259 if ((hw->mac.type < e1000_pch_lpt) ||
1260 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1261 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1262 (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1263 (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1264 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1265 return 0;
1266
1267 if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1268 /* Request ME configure ULP mode in the PHY */
1269 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1270 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1271 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1272
1273 goto out;
1274 }
1275
1276 if (!to_sx) {
1277 int i = 0;
1278
1279 /* Poll up to 5 seconds for Cable Disconnected indication */
1280 while (!(E1000_READ_REG(hw, E1000_FEXT) &
1281 E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1282 /* Bail if link is re-acquired */
1283 if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1284 return -E1000_ERR_PHY;
1285
1286 if (i++ == 100)
1287 break;
1288
1289 msec_delay(50);
1290 }
1291 DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1292 (E1000_READ_REG(hw, E1000_FEXT) &
1293 E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1294 i * 50);
1295 }
1296
1297 ret_val = hw->phy.ops.acquire(hw);
1298 if (ret_val)
1299 goto out;
1300
1301 /* Force SMBus mode in PHY */
1302 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1303 if (ret_val)
1304 goto release;
1305 phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1306 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1307
1308 /* Force SMBus mode in MAC */
1309 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1310 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1311 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1312
1313 /* Set Inband ULP Exit, Reset to SMBus mode and
1314 * Disable SMBus Release on PERST# in PHY
1315 */
1316 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1317 if (ret_val)
1318 goto release;
1319 phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1320 I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1321 if (to_sx) {
1322 if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1323 phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1324
1325 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1326 } else {
1327 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1328 }
1329 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1330
1331 /* Set Disable SMBus Release on PERST# in MAC */
1332 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1333 mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1334 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1335
1336 /* Commit ULP changes in PHY by starting auto ULP configuration */
1337 phy_reg |= I218_ULP_CONFIG1_START;
1338 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1339release:
1340 hw->phy.ops.release(hw);
1341out:
1342 if (ret_val)
1343 DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1344 else
1345 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1346
1347 return ret_val;
1348}
1349
1350/**
1351 * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1352 * @hw: pointer to the HW structure
1353 * @force: boolean indicating whether or not to force disabling ULP
1354 *
1355 * Un-configure ULP mode when link is up, the system is transitioned from
1356 * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled
1357 * system, poll for an indication from ME that ULP has been un-configured.
1358 * If not on an ME enabled system, un-configure the ULP mode by software.
1359 *
1360 * During nominal operation, this function is called when link is acquired
1361 * to disable ULP mode (force=FALSE); otherwise, for example when unloading
1362 * the driver or during Sx->S0 transitions, this is called with force=TRUE
1363 * to forcibly disable ULP.
1364 */
1365s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1366{
1367 s32 ret_val = E1000_SUCCESS;
1368 u32 mac_reg;
1369 u16 phy_reg;
1370 int i = 0;
1371
1372 if ((hw->mac.type < e1000_pch_lpt) ||
1373 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1374 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1375 (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1376 (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1377 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1378 return 0;
1379
1380 if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1381 if (force) {
1382 /* Request ME un-configure ULP mode in the PHY */
1383 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1384 mac_reg &= ~E1000_H2ME_ULP;
1385 mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1386 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1387 }
1388
1389 /* Poll up to 100msec for ME to clear ULP_CFG_DONE */
1390 while (E1000_READ_REG(hw, E1000_FWSM) &
1391 E1000_FWSM_ULP_CFG_DONE) {
1392 if (i++ == 10) {
1393 ret_val = -E1000_ERR_PHY;
1394 goto out;
1395 }
1396
1397 msec_delay(10);
1398 }
1399 DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1400
1401 if (force) {
1402 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1403 mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1404 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1405 } else {
1406 /* Clear H2ME.ULP after ME ULP configuration */
1407 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1408 mac_reg &= ~E1000_H2ME_ULP;
1409 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1410 }
1411
1412 goto out;
1413 }
1414
1415 ret_val = hw->phy.ops.acquire(hw);
1416 if (ret_val)
1417 goto out;
1418
1419 if (force)
1420 /* Toggle LANPHYPC Value bit */
1421 e1000_toggle_lanphypc_pch_lpt(hw);
1422
1423 /* Unforce SMBus mode in PHY */
1424 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1425 if (ret_val) {
1426 /* The MAC might be in PCIe mode, so temporarily force to
1427 * SMBus mode in order to access the PHY.
1428 */
1429 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1430 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1431 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1432
1433 msec_delay(50);
1434
1435 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1436 &phy_reg);
1437 if (ret_val)
1438 goto release;
1439 }
1440 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1441 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1442
1443 /* Unforce SMBus mode in MAC */
1444 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1445 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1446 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1447
1448 /* When ULP mode was previously entered, K1 was disabled by the
1449 * hardware. Re-Enable K1 in the PHY when exiting ULP.
1450 */
1451 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1452 if (ret_val)
1453 goto release;
1454 phy_reg |= HV_PM_CTRL_K1_ENABLE;
1455 e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1456
1457 /* Clear ULP enabled configuration */
1458 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1459 if (ret_val)
1460 goto release;
1461 phy_reg &= ~(I218_ULP_CONFIG1_IND |
1462 I218_ULP_CONFIG1_STICKY_ULP |
1463 I218_ULP_CONFIG1_RESET_TO_SMBUS |
1464 I218_ULP_CONFIG1_WOL_HOST |
1465 I218_ULP_CONFIG1_INBAND_EXIT |
1466 I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1467 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1468
1469 /* Commit ULP changes by starting auto ULP configuration */
1470 phy_reg |= I218_ULP_CONFIG1_START;
1471 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1472
1473 /* Clear Disable SMBus Release on PERST# in MAC */
1474 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1475 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1476 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1477
1478release:
1479 hw->phy.ops.release(hw);
1480 if (force) {
1481 hw->phy.ops.reset(hw);
1482 msec_delay(50);
1483 }
1484out:
1485 if (ret_val)
1486 DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1487 else
1488 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1489
1490 return ret_val;
1491}
1492
6a5a645e
SZ
1493/**
1494 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1495 * @hw: pointer to the HW structure
1496 *
1497 * Checks to see of the link status of the hardware has changed. If a
1498 * change in link status has been detected, then we read the PHY registers
1499 * to get the current speed/duplex if link exists.
1500 **/
1501static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1502{
1503 struct e1000_mac_info *mac = &hw->mac;
1504 s32 ret_val;
1505 bool link;
4be59a01 1506 u16 phy_reg;
6a5a645e
SZ
1507
1508 DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1509
379ebbe7 1510 /* We only want to go out to the PHY registers to see if Auto-Neg
6a5a645e
SZ
1511 * has completed and/or if our link status has changed. The
1512 * get_link_status flag is set upon receiving a Link Status
1513 * Change or Rx Sequence Error interrupt.
1514 */
4be59a01
SZ
1515 if (!mac->get_link_status)
1516 return E1000_SUCCESS;
6a5a645e 1517
4765c386
MN
1518 /* First we want to see if the MII Status Register reports
1519 * link. If so, then we want to get the current speed/duplex
1520 * of the PHY.
1521 */
1522 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1523 if (ret_val)
1524 return ret_val;
6a5a645e
SZ
1525
1526 if (hw->mac.type == e1000_pchlan) {
1527 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1528 if (ret_val)
4be59a01 1529 return ret_val;
6a5a645e
SZ
1530 }
1531
4765c386 1532 /* When connected at 10Mbps half-duplex, some parts are excessively
379ebbe7
SZ
1533 * aggressive resulting in many collisions. To avoid this, increase
1534 * the IPG and reduce Rx latency in the PHY.
1535 */
4765c386 1536 if (((hw->mac.type == e1000_pch2lan) ||
524ce499
SZ
1537 (hw->mac.type == e1000_pch_lpt) ||
1538 (hw->mac.type == e1000_pch_spt)) && link) {
379ebbe7
SZ
1539 u32 reg;
1540 reg = E1000_READ_REG(hw, E1000_STATUS);
1541 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
4765c386
MN
1542 u16 emi_addr;
1543
379ebbe7
SZ
1544 reg = E1000_READ_REG(hw, E1000_TIPG);
1545 reg &= ~E1000_TIPG_IPGT_MASK;
1546 reg |= 0xFF;
1547 E1000_WRITE_REG(hw, E1000_TIPG, reg);
1548
1549 /* Reduce Rx latency in analog PHY */
1550 ret_val = hw->phy.ops.acquire(hw);
1551 if (ret_val)
1552 return ret_val;
1553
4765c386
MN
1554 if (hw->mac.type == e1000_pch2lan)
1555 emi_addr = I82579_RX_CONFIG;
1556 else
1557 emi_addr = I217_RX_CONFIG;
1558 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0);
379ebbe7
SZ
1559
1560 hw->phy.ops.release(hw);
1561
524ce499
SZ
1562 if (ret_val)
1563 return ret_val;
1564 } else if (hw->mac.type == e1000_pch_spt &&
1565 (reg & E1000_STATUS_FD) &&
1566 (reg & E1000_STATUS_SPEED_MASK) == E1000_STATUS_SPEED_1000) {
1567 reg &= ~E1000_TIPG_IPGT_MASK;
1568 reg |= 0x0C;
1569 E1000_WRITE_REG(hw, E1000_TIPG, reg);
1570
1571 ret_val = hw->phy.ops.acquire(hw);
1572 if (ret_val)
1573 return ret_val;
1574
1575 ret_val = e1000_write_emi_reg_locked(hw, I217_RX_CONFIG, 1);
1576
1577 hw->phy.ops.release(hw);
1578
1579 if (ret_val)
1580 return ret_val;
1581 }
1582
1583 /*
1584 * What is this for?
1585 */
1586 reg = E1000_READ_REG(hw, E1000_STATUS);
1587 if (hw->mac.type == e1000_pch_spt &&
1588 (reg & E1000_STATUS_FD) &&
1589 (reg & E1000_STATUS_SPEED_MASK) == E1000_STATUS_SPEED_1000) {
1590 u16 data;
1591 u16 ptr_gap;
1592
1593 ret_val = hw->phy.ops.acquire(hw);
1594 if (ret_val)
1595 return ret_val;
1596 hw->phy.ops.read_reg_locked(hw, PHY_REG(776, 20), &data);
1597 ptr_gap = (data & (0x3FF << 2)) >> 2;
1598 if (ptr_gap < 0x18) {
1599 data &= ~(0x3FF << 2);
1600 data |= (0x18 << 2);
1601 hw->phy.ops.write_reg_locked(hw,
1602 PHY_REG(776, 20),
1603 data);
1604 }
1605 hw->phy.ops.release(hw);
1606
379ebbe7
SZ
1607 if (ret_val)
1608 return ret_val;
1609 }
1610 }
1611
524ce499
SZ
1612 /* I217 Packet Loss issue:
1613 * ensure that FEXTNVM4 Beacon Duration is set correctly
1614 * on power up.
1615 * Set the Beacon Duration for I217 to 8 usec
1616 */
1617 if ((hw->mac.type == e1000_pch_lpt) || (hw->mac.type == e1000_pch_spt)) {
1618 u32 mac_reg;
1619
1620 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1621 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1622 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1623 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1624 }
1625
379ebbe7
SZ
1626 /* Work-around I218 hang issue */
1627 if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4765c386
MN
1628 (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1629 (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1630 (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
379ebbe7
SZ
1631 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1632 if (ret_val)
1633 return ret_val;
1634 }
524ce499
SZ
1635
1636 if (hw->mac.type == e1000_pch_lpt ||
1637 hw->mac.type == e1000_pch_spt) {
4765c386
MN
1638 /* Set platform power management values for
1639 * Latency Tolerance Reporting (LTR)
1640 * Optimized Buffer Flush/Fill (OBFF)
379ebbe7
SZ
1641 */
1642 ret_val = e1000_platform_pm_pch_lpt(hw, link);
1643 if (ret_val)
1644 return ret_val;
1645 }
1646
1647 /* Clear link partner's EEE ability */
1648 hw->dev_spec.ich8lan.eee_lp_ability = 0;
1649
524ce499
SZ
1650 /* FEXTNVM6 K1-off workaround */
1651 if (hw->mac.type == e1000_pch_spt) {
1652 u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG);
1653 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1654
1655 if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
1656 fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
1657 else
1658 fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1659 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1660 }
1661
6a5a645e 1662 if (!link)
4be59a01 1663 return E1000_SUCCESS; /* No link detected */
6a5a645e
SZ
1664
1665 mac->get_link_status = FALSE;
1666
6d5e2922
SZ
1667 switch (hw->mac.type) {
1668 case e1000_pch2lan:
6a5a645e
SZ
1669 ret_val = e1000_k1_workaround_lv(hw);
1670 if (ret_val)
4be59a01 1671 return ret_val;
6d5e2922
SZ
1672 /* fall-thru */
1673 case e1000_pchlan:
1674 if (hw->phy.type == e1000_phy_82578) {
1675 ret_val = e1000_link_stall_workaround_hv(hw);
1676 if (ret_val)
4be59a01 1677 return ret_val;
6d5e2922
SZ
1678 }
1679
379ebbe7 1680 /* Workaround for PCHx parts in half-duplex:
6d5e2922
SZ
1681 * Set the number of preambles removed from the packet
1682 * when it is passed from the PHY to the MAC to prevent
1683 * the MAC from misinterpreting the packet type.
1684 */
4be59a01
SZ
1685 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1686 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1687
6d5e2922 1688 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
4be59a01
SZ
1689 E1000_STATUS_FD)
1690 phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1691
1692 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
6d5e2922
SZ
1693 break;
1694 default:
1695 break;
6a5a645e
SZ
1696 }
1697
379ebbe7 1698 /* Check if there was DownShift, must be checked
6a5a645e
SZ
1699 * immediately after link-up
1700 */
1701 e1000_check_downshift_generic(hw);
1702
1703 /* Enable/Disable EEE after link up */
4765c386
MN
1704 if (hw->phy.type > e1000_phy_82579) {
1705 ret_val = e1000_set_eee_pchlan(hw);
1706 if (ret_val)
1707 return ret_val;
1708 }
6a5a645e 1709
379ebbe7 1710 /* If we are forcing speed/duplex, then we simply return since
6a5a645e
SZ
1711 * we have already determined whether we have link or not.
1712 */
4be59a01
SZ
1713 if (!mac->autoneg)
1714 return -E1000_ERR_CONFIG;
6a5a645e 1715
379ebbe7 1716 /* Auto-Neg is enabled. Auto Speed Detection takes care
6a5a645e
SZ
1717 * of MAC speed/duplex configuration. So we only need to
1718 * configure Collision Distance in the MAC.
1719 */
4be59a01 1720 mac->ops.config_collision_dist(hw);
6a5a645e 1721
379ebbe7 1722 /* Configure Flow Control now that Auto-Neg has completed.
6a5a645e
SZ
1723 * First, we need to restore the desired flow control
1724 * settings because we may have had to re-autoneg with a
1725 * different link partner.
1726 */
1727 ret_val = e1000_config_fc_after_link_up_generic(hw);
1728 if (ret_val)
1729 DEBUGOUT("Error configuring flow control\n");
1730
6a5a645e
SZ
1731 return ret_val;
1732}
1733
9c80d176
SZ
1734/**
1735 * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1736 * @hw: pointer to the HW structure
1737 *
1738 * Initialize family-specific function pointers for PHY, MAC, and NVM.
1739 **/
1740void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1741{
1742 DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1743
1744 hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1745 hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
6a5a645e
SZ
1746 switch (hw->mac.type) {
1747 case e1000_ich8lan:
1748 case e1000_ich9lan:
1749 case e1000_ich10lan:
1750 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1751 break;
1752 case e1000_pchlan:
1753 case e1000_pch2lan:
379ebbe7 1754 case e1000_pch_lpt:
524ce499 1755 case e1000_pch_spt:
6a5a645e
SZ
1756 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1757 break;
1758 default:
1759 break;
1760 }
1761}
1762
1763/**
1764 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1765 * @hw: pointer to the HW structure
1766 *
1767 * Acquires the mutex for performing NVM operations.
1768 **/
1769static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1770{
1771 DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1772 return E1000_SUCCESS;
1773}
1774
1775/**
1776 * e1000_release_nvm_ich8lan - Release NVM mutex
1777 * @hw: pointer to the HW structure
1778 *
1779 * Releases the mutex used while performing NVM operations.
1780 **/
1781static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1782{
1783 DEBUGFUNC("e1000_release_nvm_ich8lan");
1784 return;
9c80d176
SZ
1785}
1786
1787/**
1788 * e1000_acquire_swflag_ich8lan - Acquire software control flag
1789 * @hw: pointer to the HW structure
1790 *
6a5a645e
SZ
1791 * Acquires the software control flag for performing PHY and select
1792 * MAC CSR accesses.
9c80d176
SZ
1793 **/
1794static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1795{
1796 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1797 s32 ret_val = E1000_SUCCESS;
1798
1799 DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1800
1801 while (timeout) {
1802 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
6a5a645e
SZ
1803 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1804 break;
1805
1806 msec_delay_irq(1);
1807 timeout--;
1808 }
1809
1810 if (!timeout) {
4be59a01 1811 DEBUGOUT("SW has already locked the resource.\n");
6a5a645e
SZ
1812 ret_val = -E1000_ERR_CONFIG;
1813 goto out;
1814 }
1815
1816 timeout = SW_FLAG_TIMEOUT;
9c80d176 1817
6a5a645e
SZ
1818 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1819 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1820
1821 while (timeout) {
9c80d176
SZ
1822 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1823 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1824 break;
6a5a645e 1825
9c80d176
SZ
1826 msec_delay_irq(1);
1827 timeout--;
1828 }
1829
1830 if (!timeout) {
4be59a01
SZ
1831 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1832 E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
9c80d176
SZ
1833 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1834 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1835 ret_val = -E1000_ERR_CONFIG;
1836 goto out;
1837 }
1838
1839out:
1840 return ret_val;
1841}
1842
1843/**
1844 * e1000_release_swflag_ich8lan - Release software control flag
1845 * @hw: pointer to the HW structure
1846 *
6a5a645e
SZ
1847 * Releases the software control flag for performing PHY and select
1848 * MAC CSR accesses.
9c80d176
SZ
1849 **/
1850static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1851{
1852 u32 extcnf_ctrl;
1853
1854 DEBUGFUNC("e1000_release_swflag_ich8lan");
1855
1856 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
6d5e2922
SZ
1857
1858 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1859 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1860 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1861 } else {
1862 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1863 }
9c80d176
SZ
1864 return;
1865}
1866
1867/**
1868 * e1000_check_mng_mode_ich8lan - Checks management mode
1869 * @hw: pointer to the HW structure
1870 *
6a5a645e 1871 * This checks if the adapter has any manageability enabled.
9c80d176
SZ
1872 * This is a function pointer entry point only called by read/write
1873 * routines for the PHY and NVM parts.
1874 **/
1875static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1876{
1877 u32 fwsm;
1878
1879 DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1880
1881 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1882
6a5a645e
SZ
1883 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1884 ((fwsm & E1000_FWSM_MODE_MASK) ==
1885 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
9c80d176
SZ
1886}
1887
1888/**
6a5a645e 1889 * e1000_check_mng_mode_pchlan - Checks management mode
9c80d176
SZ
1890 * @hw: pointer to the HW structure
1891 *
6a5a645e
SZ
1892 * This checks if the adapter has iAMT enabled.
1893 * This is a function pointer entry point only called by read/write
1894 * routines for the PHY and NVM parts.
9c80d176 1895 **/
6a5a645e 1896static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
9c80d176
SZ
1897{
1898 u32 fwsm;
1899
6a5a645e 1900 DEBUGFUNC("e1000_check_mng_mode_pchlan");
9c80d176
SZ
1901
1902 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1903
6a5a645e
SZ
1904 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1905 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
9c80d176
SZ
1906}
1907
1908/**
6a5a645e 1909 * e1000_rar_set_pch2lan - Set receive address register
9c80d176 1910 * @hw: pointer to the HW structure
6a5a645e
SZ
1911 * @addr: pointer to the receive address
1912 * @index: receive address array register
9c80d176 1913 *
6a5a645e
SZ
1914 * Sets the receive address array register at index to the address passed
1915 * in by addr. For 82579, RAR[0] is the base address register that is to
1916 * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1917 * Use SHRA[0-3] in place of those reserved for ME.
9c80d176 1918 **/
4765c386 1919static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
9c80d176 1920{
6a5a645e 1921 u32 rar_low, rar_high;
9c80d176 1922
6a5a645e 1923 DEBUGFUNC("e1000_rar_set_pch2lan");
9c80d176 1924
379ebbe7 1925 /* HW expects these in little endian so we reverse the byte order
6a5a645e
SZ
1926 * from network order (big endian) to little endian
1927 */
1928 rar_low = ((u32) addr[0] |
4be59a01
SZ
1929 ((u32) addr[1] << 8) |
1930 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
9c80d176 1931
6a5a645e 1932 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
9c80d176 1933
6a5a645e
SZ
1934 /* If MAC address zero, no need to set the AV bit */
1935 if (rar_low || rar_high)
1936 rar_high |= E1000_RAH_AV;
9c80d176 1937
6a5a645e
SZ
1938 if (index == 0) {
1939 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1940 E1000_WRITE_FLUSH(hw);
1941 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1942 E1000_WRITE_FLUSH(hw);
4765c386 1943 return E1000_SUCCESS;
6a5a645e 1944 }
9c80d176 1945
4765c386
MN
1946 /* RAR[1-6] are owned by manageability. Skip those and program the
1947 * next address into the SHRA register array.
1948 */
1949 if (index < (u32) (hw->mac.rar_entry_count)) {
4be59a01
SZ
1950 s32 ret_val;
1951
1952 ret_val = e1000_acquire_swflag_ich8lan(hw);
1953 if (ret_val)
1954 goto out;
1955
6a5a645e
SZ
1956 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
1957 E1000_WRITE_FLUSH(hw);
1958 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
1959 E1000_WRITE_FLUSH(hw);
9c80d176 1960
4be59a01
SZ
1961 e1000_release_swflag_ich8lan(hw);
1962
6a5a645e
SZ
1963 /* verify the register updates */
1964 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
1965 (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
4765c386 1966 return E1000_SUCCESS;
9c80d176 1967
6a5a645e
SZ
1968 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1969 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
1970 }
9c80d176 1971
4be59a01 1972out:
6a5a645e 1973 DEBUGOUT1("Failed to write receive address at index %d\n", index);
4765c386 1974 return -E1000_ERR_CONFIG;
6a5a645e 1975}
9c80d176 1976
379ebbe7
SZ
1977/**
1978 * e1000_rar_set_pch_lpt - Set receive address registers
1979 * @hw: pointer to the HW structure
1980 * @addr: pointer to the receive address
1981 * @index: receive address array register
1982 *
1983 * Sets the receive address register array at index to the address passed
1984 * in by addr. For LPT, RAR[0] is the base address register that is to
1985 * contain the MAC address. SHRA[0-10] are the shared receive address
1986 * registers that are shared between the Host and manageability engine (ME).
1987 **/
4765c386 1988static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
379ebbe7
SZ
1989{
1990 u32 rar_low, rar_high;
1991 u32 wlock_mac;
1992
1993 DEBUGFUNC("e1000_rar_set_pch_lpt");
1994
1995 /* HW expects these in little endian so we reverse the byte order
1996 * from network order (big endian) to little endian
1997 */
1998 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
1999 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2000
2001 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2002
2003 /* If MAC address zero, no need to set the AV bit */
2004 if (rar_low || rar_high)
2005 rar_high |= E1000_RAH_AV;
2006
2007 if (index == 0) {
2008 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2009 E1000_WRITE_FLUSH(hw);
2010 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2011 E1000_WRITE_FLUSH(hw);
4765c386 2012 return E1000_SUCCESS;
379ebbe7
SZ
2013 }
2014
2015 /* The manageability engine (ME) can lock certain SHRAR registers that
2016 * it is using - those registers are unavailable for use.
2017 */
2018 if (index < hw->mac.rar_entry_count) {
2019 wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
2020 E1000_FWSM_WLOCK_MAC_MASK;
2021 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
2022
2023 /* Check if all SHRAR registers are locked */
2024 if (wlock_mac == 1)
2025 goto out;
2026
2027 if ((wlock_mac == 0) || (index <= wlock_mac)) {
2028 s32 ret_val;
2029
2030 ret_val = e1000_acquire_swflag_ich8lan(hw);
2031
2032 if (ret_val)
2033 goto out;
2034
2035 E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
2036 rar_low);
2037 E1000_WRITE_FLUSH(hw);
2038 E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
2039 rar_high);
2040 E1000_WRITE_FLUSH(hw);
2041
2042 e1000_release_swflag_ich8lan(hw);
2043
2044 /* verify the register updates */
2045 if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
2046 (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
4765c386 2047 return E1000_SUCCESS;
379ebbe7
SZ
2048 }
2049 }
2050
2051out:
2052 DEBUGOUT1("Failed to write receive address at index %d\n", index);
4765c386 2053 return -E1000_ERR_CONFIG;
379ebbe7
SZ
2054}
2055
6d5e2922
SZ
2056/**
2057 * e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
2058 * @hw: pointer to the HW structure
2059 * @mc_addr_list: array of multicast addresses to program
2060 * @mc_addr_count: number of multicast addresses to program
2061 *
2062 * Updates entire Multicast Table Array of the PCH2 MAC and PHY.
2063 * The caller must have a packed mc_addr_list of multicast addresses.
2064 **/
2065static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
4be59a01
SZ
2066 u8 *mc_addr_list,
2067 u32 mc_addr_count)
6d5e2922
SZ
2068{
2069 u16 phy_reg = 0;
2070 int i;
2071 s32 ret_val;
2072
2073 DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
2074
2075 e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
2076
2077 ret_val = hw->phy.ops.acquire(hw);
2078 if (ret_val)
2079 return;
2080
2081 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2082 if (ret_val)
2083 goto release;
2084
2085 for (i = 0; i < hw->mac.mta_reg_count; i++) {
2086 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
4be59a01
SZ
2087 (u16)(hw->mac.mta_shadow[i] &
2088 0xFFFF));
6d5e2922 2089 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
4be59a01
SZ
2090 (u16)((hw->mac.mta_shadow[i] >> 16) &
2091 0xFFFF));
6d5e2922
SZ
2092 }
2093
2094 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2095
2096release:
2097 hw->phy.ops.release(hw);
2098}
2099
6a5a645e
SZ
2100/**
2101 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
2102 * @hw: pointer to the HW structure
2103 *
2104 * Checks if firmware is blocking the reset of the PHY.
2105 * This is a function pointer entry point only called by
2106 * reset routines.
2107 **/
2108static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
2109{
2110 u32 fwsm;
4765c386
MN
2111 bool blocked = FALSE;
2112 int i = 0;
9c80d176 2113
6a5a645e 2114 DEBUGFUNC("e1000_check_reset_block_ich8lan");
9c80d176 2115
4765c386
MN
2116 do {
2117 fwsm = E1000_READ_REG(hw, E1000_FWSM);
2118 if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
2119 blocked = TRUE;
2120 msec_delay(10);
2121 continue;
2122 }
2123 blocked = FALSE;
2124 } while (blocked && (i++ < 10));
2125 return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
9c80d176
SZ
2126}
2127
2128/**
6a5a645e 2129 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
9c80d176
SZ
2130 * @hw: pointer to the HW structure
2131 *
6a5a645e
SZ
2132 * Assumes semaphore already acquired.
2133 *
9c80d176 2134 **/
6a5a645e 2135static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
9c80d176 2136{
6a5a645e
SZ
2137 u16 phy_data;
2138 u32 strap = E1000_READ_REG(hw, E1000_STRAP);
379ebbe7
SZ
2139 u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2140 E1000_STRAP_SMT_FREQ_SHIFT;
2141 s32 ret_val;
9c80d176 2142
6a5a645e 2143 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
9c80d176 2144
6a5a645e 2145 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
9c80d176 2146 if (ret_val)
4be59a01 2147 return ret_val;
9c80d176 2148
6a5a645e
SZ
2149 phy_data &= ~HV_SMB_ADDR_MASK;
2150 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2151 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
6a5a645e 2152
379ebbe7
SZ
2153 if (hw->phy.type == e1000_phy_i217) {
2154 /* Restore SMBus frequency */
2155 if (freq--) {
2156 phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2157 phy_data |= (freq & (1 << 0)) <<
2158 HV_SMB_ADDR_FREQ_LOW_SHIFT;
2159 phy_data |= (freq & (1 << 1)) <<
2160 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2161 } else {
2162 DEBUGOUT("Unsupported SMB frequency in PHY\n");
2163 }
2164 }
2165
4be59a01 2166 return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
6a5a645e
SZ
2167}
2168
2169/**
2170 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2171 * @hw: pointer to the HW structure
2172 *
2173 * SW should configure the LCD from the NVM extended configuration region
2174 * as a workaround for certain parts.
2175 **/
2176static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2177{
2178 struct e1000_phy_info *phy = &hw->phy;
2179 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2180 s32 ret_val = E1000_SUCCESS;
2181 u16 word_addr, reg_data, reg_addr, phy_page = 0;
2182
2183 DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2184
379ebbe7 2185 /* Initialize the PHY from the NVM on ICH platforms. This
9c80d176
SZ
2186 * is needed due to an issue where the NVM configuration is
2187 * not properly autoloaded after power transitions.
2188 * Therefore, after each PHY reset, we will load the
2189 * configuration data out of the NVM manually.
2190 */
6a5a645e
SZ
2191 switch (hw->mac.type) {
2192 case e1000_ich8lan:
2193 if (phy->type != e1000_phy_igp_3)
2194 return ret_val;
2195
2196 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2197 (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
9c80d176 2198 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
6a5a645e
SZ
2199 break;
2200 }
2201 /* Fall-thru */
2202 case e1000_pchlan:
2203 case e1000_pch2lan:
379ebbe7 2204 case e1000_pch_lpt:
524ce499 2205 case e1000_pch_spt:
6a5a645e
SZ
2206 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2207 break;
2208 default:
2209 return ret_val;
2210 }
9c80d176 2211
6a5a645e
SZ
2212 ret_val = hw->phy.ops.acquire(hw);
2213 if (ret_val)
2214 return ret_val;
2215
2216 data = E1000_READ_REG(hw, E1000_FEXTNVM);
2217 if (!(data & sw_cfg_mask))
4be59a01 2218 goto release;
6a5a645e 2219
379ebbe7 2220 /* Make sure HW does not configure LCD from PHY
6a5a645e
SZ
2221 * extended configuration before SW configuration
2222 */
2223 data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
4be59a01
SZ
2224 if ((hw->mac.type < e1000_pch2lan) &&
2225 (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2226 goto release;
9c80d176 2227
6a5a645e
SZ
2228 cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2229 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2230 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2231 if (!cnf_size)
4be59a01 2232 goto release;
9c80d176 2233
6a5a645e
SZ
2234 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2235 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2236
4be59a01
SZ
2237 if (((hw->mac.type == e1000_pchlan) &&
2238 !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2239 (hw->mac.type > e1000_pchlan)) {
379ebbe7 2240 /* HW configures the SMBus address and LEDs when the
6a5a645e
SZ
2241 * OEM and LCD Write Enable bits are set in the NVM.
2242 * When both NVM bits are cleared, SW will configure
2243 * them instead.
9c80d176 2244 */
6a5a645e
SZ
2245 ret_val = e1000_write_smbus_addr(hw);
2246 if (ret_val)
4be59a01 2247 goto release;
9c80d176 2248
6a5a645e
SZ
2249 data = E1000_READ_REG(hw, E1000_LEDCTL);
2250 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2251 (u16)data);
2252 if (ret_val)
4be59a01 2253 goto release;
6a5a645e 2254 }
9c80d176 2255
6a5a645e
SZ
2256 /* Configure LCD from extended configuration region. */
2257
2258 /* cnf_base_addr is in DWORD */
2259 word_addr = (u16)(cnf_base_addr << 1);
2260
2261 for (i = 0; i < cnf_size; i++) {
2262 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2263 &reg_data);
2264 if (ret_val)
4be59a01 2265 goto release;
9c80d176 2266
6a5a645e
SZ
2267 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2268 1, &reg_addr);
2269 if (ret_val)
4be59a01 2270 goto release;
6a5a645e
SZ
2271
2272 /* Save off the PHY page for future writes. */
2273 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2274 phy_page = reg_data;
2275 continue;
2276 }
2277
2278 reg_addr &= PHY_REG_MASK;
2279 reg_addr |= phy_page;
2280
2281 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2282 reg_data);
2283 if (ret_val)
4be59a01 2284 goto release;
6a5a645e 2285 }
9c80d176 2286
4be59a01 2287release:
6a5a645e
SZ
2288 hw->phy.ops.release(hw);
2289 return ret_val;
2290}
9c80d176 2291
6a5a645e
SZ
2292/**
2293 * e1000_k1_gig_workaround_hv - K1 Si workaround
2294 * @hw: pointer to the HW structure
2295 * @link: link up bool flag
2296 *
2297 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2298 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
2299 * If link is down, the function will restore the default K1 setting located
2300 * in the NVM.
2301 **/
2302static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2303{
2304 s32 ret_val = E1000_SUCCESS;
2305 u16 status_reg = 0;
2306 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
9c80d176 2307
6a5a645e 2308 DEBUGFUNC("e1000_k1_gig_workaround_hv");
9c80d176 2309
6a5a645e 2310 if (hw->mac.type != e1000_pchlan)
4be59a01 2311 return E1000_SUCCESS;
9c80d176 2312
6a5a645e
SZ
2313 /* Wrap the whole flow with the sw flag */
2314 ret_val = hw->phy.ops.acquire(hw);
2315 if (ret_val)
4be59a01 2316 return ret_val;
6a5a645e
SZ
2317
2318 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2319 if (link) {
2320 if (hw->phy.type == e1000_phy_82578) {
2321 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
4be59a01 2322 &status_reg);
9c80d176 2323 if (ret_val)
6a5a645e 2324 goto release;
9c80d176 2325
4765c386
MN
2326 status_reg &= (BM_CS_STATUS_LINK_UP |
2327 BM_CS_STATUS_RESOLVED |
2328 BM_CS_STATUS_SPEED_MASK);
9c80d176 2329
6a5a645e 2330 if (status_reg == (BM_CS_STATUS_LINK_UP |
4be59a01
SZ
2331 BM_CS_STATUS_RESOLVED |
2332 BM_CS_STATUS_SPEED_1000))
6a5a645e
SZ
2333 k1_enable = FALSE;
2334 }
9c80d176 2335
6a5a645e
SZ
2336 if (hw->phy.type == e1000_phy_82577) {
2337 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
4be59a01 2338 &status_reg);
9c80d176 2339 if (ret_val)
6a5a645e
SZ
2340 goto release;
2341
4765c386
MN
2342 status_reg &= (HV_M_STATUS_LINK_UP |
2343 HV_M_STATUS_AUTONEG_COMPLETE |
2344 HV_M_STATUS_SPEED_MASK);
6a5a645e
SZ
2345
2346 if (status_reg == (HV_M_STATUS_LINK_UP |
4be59a01
SZ
2347 HV_M_STATUS_AUTONEG_COMPLETE |
2348 HV_M_STATUS_SPEED_1000))
6a5a645e 2349 k1_enable = FALSE;
9c80d176 2350 }
6a5a645e
SZ
2351
2352 /* Link stall fix for link up */
2353 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
4be59a01 2354 0x0100);
6a5a645e
SZ
2355 if (ret_val)
2356 goto release;
2357
2358 } else {
2359 /* Link stall fix for link down */
2360 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
4be59a01 2361 0x4100);
6a5a645e
SZ
2362 if (ret_val)
2363 goto release;
9c80d176
SZ
2364 }
2365
6a5a645e
SZ
2366 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2367
2368release:
2369 hw->phy.ops.release(hw);
4be59a01 2370
9c80d176
SZ
2371 return ret_val;
2372}
2373
2374/**
6a5a645e 2375 * e1000_configure_k1_ich8lan - Configure K1 power state
9c80d176 2376 * @hw: pointer to the HW structure
6a5a645e
SZ
2377 * @enable: K1 state to configure
2378 *
2379 * Configure the K1 power state based on the provided parameter.
2380 * Assumes semaphore already acquired.
9c80d176 2381 *
6a5a645e 2382 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
9c80d176 2383 **/
6a5a645e 2384s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
9c80d176 2385{
379ebbe7 2386 s32 ret_val;
6a5a645e
SZ
2387 u32 ctrl_reg = 0;
2388 u32 ctrl_ext = 0;
2389 u32 reg = 0;
2390 u16 kmrn_reg = 0;
9c80d176 2391
6a5a645e 2392 DEBUGFUNC("e1000_configure_k1_ich8lan");
9c80d176 2393
4be59a01
SZ
2394 ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2395 &kmrn_reg);
6a5a645e 2396 if (ret_val)
4be59a01 2397 return ret_val;
6a5a645e
SZ
2398
2399 if (k1_enable)
2400 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2401 else
2402 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2403
4be59a01
SZ
2404 ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2405 kmrn_reg);
6a5a645e 2406 if (ret_val)
4be59a01 2407 return ret_val;
6a5a645e
SZ
2408
2409 usec_delay(20);
2410 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2411 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2412
2413 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2414 reg |= E1000_CTRL_FRCSPD;
2415 E1000_WRITE_REG(hw, E1000_CTRL, reg);
2416
2417 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
4be59a01 2418 E1000_WRITE_FLUSH(hw);
6a5a645e
SZ
2419 usec_delay(20);
2420 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2421 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
4be59a01 2422 E1000_WRITE_FLUSH(hw);
6a5a645e
SZ
2423 usec_delay(20);
2424
4be59a01 2425 return E1000_SUCCESS;
6a5a645e
SZ
2426}
2427
2428/**
2429 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2430 * @hw: pointer to the HW structure
2431 * @d0_state: boolean if entering d0 or d3 device state
2432 *
2433 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2434 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
2435 * in NVM determines whether HW should configure LPLU and Gbe Disable.
2436 **/
4be59a01 2437static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
6a5a645e
SZ
2438{
2439 s32 ret_val = 0;
2440 u32 mac_reg;
2441 u16 oem_reg;
2442
2443 DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2444
4be59a01 2445 if (hw->mac.type < e1000_pchlan)
6a5a645e
SZ
2446 return ret_val;
2447
2448 ret_val = hw->phy.ops.acquire(hw);
2449 if (ret_val)
2450 return ret_val;
2451
4be59a01 2452 if (hw->mac.type == e1000_pchlan) {
6a5a645e
SZ
2453 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2454 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
4be59a01 2455 goto release;
9c80d176
SZ
2456 }
2457
6a5a645e
SZ
2458 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2459 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
4be59a01 2460 goto release;
6a5a645e
SZ
2461
2462 mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2463
2464 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2465 if (ret_val)
4be59a01 2466 goto release;
6a5a645e
SZ
2467
2468 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2469
2470 if (d0_state) {
2471 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2472 oem_reg |= HV_OEM_BITS_GBE_DIS;
2473
2474 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2475 oem_reg |= HV_OEM_BITS_LPLU;
2476 } else {
4be59a01
SZ
2477 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2478 E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
6a5a645e
SZ
2479 oem_reg |= HV_OEM_BITS_GBE_DIS;
2480
4be59a01
SZ
2481 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2482 E1000_PHY_CTRL_NOND0A_LPLU))
6a5a645e
SZ
2483 oem_reg |= HV_OEM_BITS_LPLU;
2484 }
4be59a01
SZ
2485
2486 /* Set Restart auto-neg to activate the bits */
2487 if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2488 !hw->phy.ops.check_reset_block(hw))
6a5a645e 2489 oem_reg |= HV_OEM_BITS_RESTART_AN;
4be59a01 2490
6a5a645e
SZ
2491 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2492
4be59a01 2493release:
6a5a645e
SZ
2494 hw->phy.ops.release(hw);
2495
9c80d176
SZ
2496 return ret_val;
2497}
2498
6a5a645e 2499
6a5a645e
SZ
2500/**
2501 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2502 * @hw: pointer to the HW structure
2503 **/
2504static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
9c80d176 2505{
9c80d176
SZ
2506 s32 ret_val;
2507 u16 data;
9c80d176 2508
6a5a645e 2509 DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
9c80d176 2510
6a5a645e 2511 ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
9c80d176 2512 if (ret_val)
6a5a645e
SZ
2513 return ret_val;
2514
2515 data |= HV_KMRN_MDIO_SLOW;
2516
2517 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2518
2519 return ret_val;
2520}
2521
2522/**
2523 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2524 * done after every PHY reset.
2525 **/
2526static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2527{
2528 s32 ret_val = E1000_SUCCESS;
2529 u16 phy_data;
2530
2531 DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2532
2533 if (hw->mac.type != e1000_pchlan)
4be59a01 2534 return E1000_SUCCESS;
9c80d176 2535
6a5a645e
SZ
2536 /* Set MDIO slow mode before any other MDIO access */
2537 if (hw->phy.type == e1000_phy_82577) {
2538 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2539 if (ret_val)
4be59a01 2540 return ret_val;
6a5a645e
SZ
2541 }
2542
6a5a645e
SZ
2543 if (((hw->phy.type == e1000_phy_82577) &&
2544 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2545 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2546 /* Disable generation of early preamble */
2547 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2548 if (ret_val)
4be59a01 2549 return ret_val;
6a5a645e
SZ
2550
2551 /* Preamble tuning for SSC */
4be59a01
SZ
2552 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2553 0xA204);
6a5a645e 2554 if (ret_val)
4be59a01 2555 return ret_val;
6a5a645e
SZ
2556 }
2557
2558 if (hw->phy.type == e1000_phy_82578) {
379ebbe7 2559 /* Return registers to default by doing a soft reset then
6a5a645e
SZ
2560 * writing 0x3140 to the control register.
2561 */
2562 if (hw->phy.revision < 2) {
2563 e1000_phy_sw_reset_generic(hw);
2564 ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
4be59a01 2565 0x3140);
6a5a645e
SZ
2566 }
2567 }
2568
6a5a645e
SZ
2569 /* Select page 0 */
2570 ret_val = hw->phy.ops.acquire(hw);
2571 if (ret_val)
4be59a01 2572 return ret_val;
6a5a645e
SZ
2573
2574 hw->phy.addr = 1;
2575 ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2576 hw->phy.ops.release(hw);
2577 if (ret_val)
4be59a01 2578 return ret_val;
6a5a645e 2579
379ebbe7 2580 /* Configure the K1 Si workaround during phy reset assuming there is
6a5a645e
SZ
2581 * link so that it disables K1 if link is in 1Gbps.
2582 */
2583 ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
2584 if (ret_val)
4be59a01 2585 return ret_val;
6a5a645e
SZ
2586
2587 /* Workaround for link disconnects on a busy hub in half duplex */
2588 ret_val = hw->phy.ops.acquire(hw);
2589 if (ret_val)
4be59a01
SZ
2590 return ret_val;
2591 ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
6a5a645e
SZ
2592 if (ret_val)
2593 goto release;
4be59a01
SZ
2594 ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2595 phy_data & 0x00FF);
379ebbe7
SZ
2596 if (ret_val)
2597 goto release;
2598
2599 /* set MSE higher to enable link to stay up when noise is high */
2600 ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
6a5a645e
SZ
2601release:
2602 hw->phy.ops.release(hw);
4be59a01 2603
6a5a645e
SZ
2604 return ret_val;
2605}
2606
2607/**
2608 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2609 * @hw: pointer to the HW structure
2610 **/
2611void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2612{
2613 u32 mac_reg;
6d5e2922
SZ
2614 u16 i, phy_reg = 0;
2615 s32 ret_val;
6a5a645e
SZ
2616
2617 DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2618
6d5e2922
SZ
2619 ret_val = hw->phy.ops.acquire(hw);
2620 if (ret_val)
2621 return;
2622 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2623 if (ret_val)
2624 goto release;
2625
4765c386
MN
2626 /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2627 for (i = 0; i < (hw->mac.rar_entry_count); i++) {
6a5a645e 2628 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
6d5e2922 2629 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
4be59a01 2630 (u16)(mac_reg & 0xFFFF));
6d5e2922 2631 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
4be59a01 2632 (u16)((mac_reg >> 16) & 0xFFFF));
6d5e2922 2633
6a5a645e 2634 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
6d5e2922 2635 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
4be59a01 2636 (u16)(mac_reg & 0xFFFF));
6d5e2922 2637 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
4be59a01
SZ
2638 (u16)((mac_reg & E1000_RAH_AV)
2639 >> 16));
6a5a645e 2640 }
6d5e2922
SZ
2641
2642 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2643
2644release:
2645 hw->phy.ops.release(hw);
6a5a645e
SZ
2646}
2647
2648static u32 e1000_calc_rx_da_crc(u8 mac[])
2649{
2650 u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
2651 u32 i, j, mask, crc;
2652
2653 DEBUGFUNC("e1000_calc_rx_da_crc");
2654
2655 crc = 0xffffffff;
2656 for (i = 0; i < 6; i++) {
2657 crc = crc ^ mac[i];
2658 for (j = 8; j > 0; j--) {
2659 mask = (crc & 1) * (-1);
2660 crc = (crc >> 1) ^ (poly & mask);
2661 }
9c80d176 2662 }
6a5a645e
SZ
2663 return ~crc;
2664}
9c80d176 2665
6a5a645e
SZ
2666/**
2667 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2668 * with 82579 PHY
2669 * @hw: pointer to the HW structure
2670 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
2671 **/
2672s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2673{
2674 s32 ret_val = E1000_SUCCESS;
2675 u16 phy_reg, data;
2676 u32 mac_reg;
2677 u16 i;
2678
2679 DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2680
379ebbe7 2681 if (hw->mac.type < e1000_pch2lan)
4be59a01 2682 return E1000_SUCCESS;
6a5a645e
SZ
2683
2684 /* disable Rx path while enabling/disabling workaround */
2685 hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
4be59a01
SZ
2686 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2687 phy_reg | (1 << 14));
9c80d176 2688 if (ret_val)
4be59a01 2689 return ret_val;
9c80d176 2690
6a5a645e 2691 if (enable) {
4765c386 2692 /* Write Rx addresses (rar_entry_count for RAL/H, and
6a5a645e
SZ
2693 * SHRAL/H) and initial CRC values to the MAC
2694 */
4765c386 2695 for (i = 0; i < hw->mac.rar_entry_count; i++) {
6a5a645e
SZ
2696 u8 mac_addr[ETH_ADDR_LEN] = {0};
2697 u32 addr_high, addr_low;
2698
2699 addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2700 if (!(addr_high & E1000_RAH_AV))
2701 continue;
2702 addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2703 mac_addr[0] = (addr_low & 0xFF);
2704 mac_addr[1] = ((addr_low >> 8) & 0xFF);
2705 mac_addr[2] = ((addr_low >> 16) & 0xFF);
2706 mac_addr[3] = ((addr_low >> 24) & 0xFF);
2707 mac_addr[4] = (addr_high & 0xFF);
2708 mac_addr[5] = ((addr_high >> 8) & 0xFF);
2709
2710 E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2711 e1000_calc_rx_da_crc(mac_addr));
2712 }
2713
2714 /* Write Rx addresses to the PHY */
2715 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2716
2717 /* Enable jumbo frame workaround in the MAC */
2718 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2719 mac_reg &= ~(1 << 14);
2720 mac_reg |= (7 << 15);
2721 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2722
2723 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2724 mac_reg |= E1000_RCTL_SECRC;
2725 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2726
2727 ret_val = e1000_read_kmrn_reg_generic(hw,
2728 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2729 &data);
2730 if (ret_val)
4be59a01 2731 return ret_val;
6a5a645e
SZ
2732 ret_val = e1000_write_kmrn_reg_generic(hw,
2733 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2734 data | (1 << 0));
2735 if (ret_val)
4be59a01 2736 return ret_val;
6a5a645e
SZ
2737 ret_val = e1000_read_kmrn_reg_generic(hw,
2738 E1000_KMRNCTRLSTA_HD_CTRL,
2739 &data);
2740 if (ret_val)
4be59a01 2741 return ret_val;
6a5a645e
SZ
2742 data &= ~(0xF << 8);
2743 data |= (0xB << 8);
2744 ret_val = e1000_write_kmrn_reg_generic(hw,
2745 E1000_KMRNCTRLSTA_HD_CTRL,
2746 data);
2747 if (ret_val)
4be59a01 2748 return ret_val;
6a5a645e
SZ
2749
2750 /* Enable jumbo frame workaround in the PHY */
2751 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2752 data &= ~(0x7F << 5);
2753 data |= (0x37 << 5);
2754 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2755 if (ret_val)
4be59a01 2756 return ret_val;
6a5a645e
SZ
2757 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2758 data &= ~(1 << 13);
2759 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2760 if (ret_val)
4be59a01 2761 return ret_val;
6a5a645e
SZ
2762 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2763 data &= ~(0x3FF << 2);
4765c386 2764 data |= (E1000_TX_PTR_GAP << 2);
6a5a645e
SZ
2765 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2766 if (ret_val)
4be59a01
SZ
2767 return ret_val;
2768 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
6a5a645e 2769 if (ret_val)
4be59a01 2770 return ret_val;
6a5a645e 2771 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
4be59a01
SZ
2772 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2773 (1 << 10));
9c80d176 2774 if (ret_val)
4be59a01 2775 return ret_val;
9c80d176 2776 } else {
6a5a645e
SZ
2777 /* Write MAC register values back to h/w defaults */
2778 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2779 mac_reg &= ~(0xF << 14);
2780 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2781
2782 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2783 mac_reg &= ~E1000_RCTL_SECRC;
2784 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2785
2786 ret_val = e1000_read_kmrn_reg_generic(hw,
2787 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2788 &data);
2789 if (ret_val)
4be59a01 2790 return ret_val;
6a5a645e
SZ
2791 ret_val = e1000_write_kmrn_reg_generic(hw,
2792 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2793 data & ~(1 << 0));
2794 if (ret_val)
4be59a01 2795 return ret_val;
6a5a645e
SZ
2796 ret_val = e1000_read_kmrn_reg_generic(hw,
2797 E1000_KMRNCTRLSTA_HD_CTRL,
2798 &data);
2799 if (ret_val)
4be59a01 2800 return ret_val;
6a5a645e
SZ
2801 data &= ~(0xF << 8);
2802 data |= (0xB << 8);
2803 ret_val = e1000_write_kmrn_reg_generic(hw,
2804 E1000_KMRNCTRLSTA_HD_CTRL,
2805 data);
2806 if (ret_val)
4be59a01 2807 return ret_val;
6a5a645e
SZ
2808
2809 /* Write PHY register values back to h/w defaults */
2810 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2811 data &= ~(0x7F << 5);
2812 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2813 if (ret_val)
4be59a01 2814 return ret_val;
6a5a645e
SZ
2815 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2816 data |= (1 << 13);
2817 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2818 if (ret_val)
4be59a01 2819 return ret_val;
6a5a645e
SZ
2820 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2821 data &= ~(0x3FF << 2);
2822 data |= (0x8 << 2);
2823 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2824 if (ret_val)
4be59a01 2825 return ret_val;
6a5a645e
SZ
2826 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2827 if (ret_val)
4be59a01 2828 return ret_val;
6a5a645e 2829 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
4be59a01
SZ
2830 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2831 ~(1 << 10));
6a5a645e 2832 if (ret_val)
4be59a01 2833 return ret_val;
9c80d176
SZ
2834 }
2835
6a5a645e 2836 /* re-enable Rx path after enabling/disabling workaround */
4be59a01
SZ
2837 return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2838 ~(1 << 14));
6a5a645e
SZ
2839}
2840
2841/**
2842 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2843 * done after every PHY reset.
2844 **/
2845static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2846{
2847 s32 ret_val = E1000_SUCCESS;
2848
2849 DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2850
2851 if (hw->mac.type != e1000_pch2lan)
4be59a01 2852 return E1000_SUCCESS;
6a5a645e
SZ
2853
2854 /* Set MDIO slow mode before any other MDIO access */
2855 ret_val = e1000_set_mdio_slow_mode_hv(hw);
379ebbe7
SZ
2856 if (ret_val)
2857 return ret_val;
6a5a645e 2858
4be59a01
SZ
2859 ret_val = hw->phy.ops.acquire(hw);
2860 if (ret_val)
2861 return ret_val;
4be59a01 2862 /* set MSE higher to enable link to stay up when noise is high */
379ebbe7 2863 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
4be59a01
SZ
2864 if (ret_val)
2865 goto release;
2866 /* drop link after 5 times MSE threshold was reached */
379ebbe7 2867 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
4be59a01
SZ
2868release:
2869 hw->phy.ops.release(hw);
2870
6a5a645e
SZ
2871 return ret_val;
2872}
2873
2874/**
2875 * e1000_k1_gig_workaround_lv - K1 Si workaround
2876 * @hw: pointer to the HW structure
2877 *
4765c386
MN
2878 * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2879 * Disable K1 for 1000 and 100 speeds
6a5a645e
SZ
2880 **/
2881static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2882{
2883 s32 ret_val = E1000_SUCCESS;
2884 u16 status_reg = 0;
6a5a645e
SZ
2885
2886 DEBUGFUNC("e1000_k1_workaround_lv");
2887
2888 if (hw->mac.type != e1000_pch2lan)
4be59a01 2889 return E1000_SUCCESS;
6a5a645e 2890
4765c386 2891 /* Set K1 beacon duration based on 10Mbs speed */
6a5a645e 2892 ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
9c80d176 2893 if (ret_val)
4be59a01 2894 return ret_val;
9c80d176 2895
6a5a645e
SZ
2896 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2897 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
4765c386
MN
2898 if (status_reg &
2899 (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
4be59a01
SZ
2900 u16 pm_phy_reg;
2901
4765c386 2902 /* LV 1G/100 Packet drop issue wa */
4be59a01
SZ
2903 ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2904 &pm_phy_reg);
2905 if (ret_val)
2906 return ret_val;
4765c386 2907 pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
4be59a01
SZ
2908 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2909 pm_phy_reg);
2910 if (ret_val)
2911 return ret_val;
2912 } else {
4765c386
MN
2913 u32 mac_reg;
2914 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
2915 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
6a5a645e 2916 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
4765c386 2917 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
4be59a01 2918 }
6a5a645e 2919 }
9c80d176 2920
9c80d176
SZ
2921 return ret_val;
2922}
2923
2924/**
6a5a645e
SZ
2925 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2926 * @hw: pointer to the HW structure
6d5e2922 2927 * @gate: boolean set to TRUE to gate, FALSE to ungate
6a5a645e
SZ
2928 *
2929 * Gate/ungate the automatic PHY configuration via hardware; perform
2930 * the configuration via software instead.
2931 **/
2932static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2933{
2934 u32 extcnf_ctrl;
2935
2936 DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
2937
379ebbe7 2938 if (hw->mac.type < e1000_pch2lan)
6a5a645e
SZ
2939 return;
2940
2941 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2942
2943 if (gate)
2944 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2945 else
2946 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2947
2948 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
6a5a645e
SZ
2949}
2950
6a5a645e
SZ
2951/**
2952 * e1000_lan_init_done_ich8lan - Check for PHY config completion
9c80d176
SZ
2953 * @hw: pointer to the HW structure
2954 *
6a5a645e
SZ
2955 * Check the appropriate indication the MAC has finished configuring the
2956 * PHY after a software reset.
9c80d176 2957 **/
6a5a645e 2958static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
9c80d176 2959{
6a5a645e 2960 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
9c80d176 2961
6a5a645e
SZ
2962 DEBUGFUNC("e1000_lan_init_done_ich8lan");
2963
2964 /* Wait for basic configuration completes before proceeding */
2965 do {
2966 data = E1000_READ_REG(hw, E1000_STATUS);
2967 data &= E1000_STATUS_LAN_INIT_DONE;
2968 usec_delay(100);
2969 } while ((!data) && --loop);
9c80d176 2970
379ebbe7 2971 /* If basic configuration is incomplete before the above loop
6a5a645e
SZ
2972 * count reaches 0, loading the configuration from NVM will
2973 * leave the PHY in a bad state possibly resulting in no link.
9c80d176 2974 */
6a5a645e
SZ
2975 if (loop == 0)
2976 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
2977
2978 /* Clear the Init Done bit for the next init event */
2979 data = E1000_READ_REG(hw, E1000_STATUS);
2980 data &= ~E1000_STATUS_LAN_INIT_DONE;
2981 E1000_WRITE_REG(hw, E1000_STATUS, data);
2982}
2983
2984/**
2985 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2986 * @hw: pointer to the HW structure
2987 **/
2988static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2989{
2990 s32 ret_val = E1000_SUCCESS;
2991 u16 reg;
2992
2993 DEBUGFUNC("e1000_post_phy_reset_ich8lan");
2994
2995 if (hw->phy.ops.check_reset_block(hw))
4be59a01 2996 return E1000_SUCCESS;
6a5a645e
SZ
2997
2998 /* Allow time for h/w to get to quiescent state after reset */
2999 msec_delay(10);
3000
3001 /* Perform any necessary post-reset workarounds */
3002 switch (hw->mac.type) {
3003 case e1000_pchlan:
3004 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
3005 if (ret_val)
4be59a01 3006 return ret_val;
6a5a645e
SZ
3007 break;
3008 case e1000_pch2lan:
3009 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
3010 if (ret_val)
4be59a01 3011 return ret_val;
6a5a645e
SZ
3012 break;
3013 default:
3014 break;
3015 }
3016
6d5e2922
SZ
3017 /* Clear the host wakeup bit after lcd reset */
3018 if (hw->mac.type >= e1000_pchlan) {
3019 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
3020 reg &= ~BM_WUC_HOST_WU_BIT;
3021 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
6a5a645e
SZ
3022 }
3023
6a5a645e
SZ
3024 /* Configure the LCD with the extended configuration region in NVM */
3025 ret_val = e1000_sw_lcd_config_ich8lan(hw);
3026 if (ret_val)
4be59a01 3027 return ret_val;
6a5a645e
SZ
3028
3029 /* Configure the LCD with the OEM bits in NVM */
3030 ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
3031
6d5e2922
SZ
3032 if (hw->mac.type == e1000_pch2lan) {
3033 /* Ungate automatic PHY configuration on non-managed 82579 */
3034 if (!(E1000_READ_REG(hw, E1000_FWSM) &
3035 E1000_ICH_FWSM_FW_VALID)) {
3036 msec_delay(10);
3037 e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
3038 }
3039
3040 /* Set EEE LPI Update Timer to 200usec */
3041 ret_val = hw->phy.ops.acquire(hw);
3042 if (ret_val)
4be59a01 3043 return ret_val;
379ebbe7
SZ
3044 ret_val = e1000_write_emi_reg_locked(hw,
3045 I82579_LPI_UPDATE_TIMER,
3046 0x1387);
6d5e2922 3047 hw->phy.ops.release(hw);
9c80d176
SZ
3048 }
3049
6a5a645e
SZ
3050 return ret_val;
3051}
3052
3053/**
3054 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
3055 * @hw: pointer to the HW structure
3056 *
3057 * Resets the PHY
3058 * This is a function pointer entry point called by drivers
3059 * or other shared routines.
3060 **/
3061static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
3062{
3063 s32 ret_val = E1000_SUCCESS;
3064
3065 DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
3066
3067 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
3068 if ((hw->mac.type == e1000_pch2lan) &&
3069 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3070 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
3071
3072 ret_val = e1000_phy_hw_reset_generic(hw);
3073 if (ret_val)
4be59a01 3074 return ret_val;
6a5a645e 3075
4be59a01 3076 return e1000_post_phy_reset_ich8lan(hw);
6a5a645e
SZ
3077}
3078
3079/**
3080 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
3081 * @hw: pointer to the HW structure
3082 * @active: TRUE to enable LPLU, FALSE to disable
3083 *
3084 * Sets the LPLU state according to the active flag. For PCH, if OEM write
3085 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
3086 * the phy speed. This function will manually set the LPLU bit and restart
3087 * auto-neg as hw would do. D3 and D0 LPLU will call the same function
3088 * since it configures the same bit.
3089 **/
3090static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
3091{
379ebbe7 3092 s32 ret_val;
6a5a645e
SZ
3093 u16 oem_reg;
3094
3095 DEBUGFUNC("e1000_set_lplu_state_pchlan");
9c80d176 3096
6a5a645e
SZ
3097 ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
3098 if (ret_val)
4be59a01 3099 return ret_val;
9c80d176 3100
6a5a645e
SZ
3101 if (active)
3102 oem_reg |= HV_OEM_BITS_LPLU;
3103 else
3104 oem_reg &= ~HV_OEM_BITS_LPLU;
3105
4be59a01
SZ
3106 if (!hw->phy.ops.check_reset_block(hw))
3107 oem_reg |= HV_OEM_BITS_RESTART_AN;
6a5a645e 3108
4be59a01 3109 return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
9c80d176
SZ
3110}
3111
3112/**
3113 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
3114 * @hw: pointer to the HW structure
3115 * @active: TRUE to enable LPLU, FALSE to disable
3116 *
3117 * Sets the LPLU D0 state according to the active flag. When
3118 * activating LPLU this function also disables smart speed
3119 * and vice versa. LPLU will not be activated unless the
3120 * device autonegotiation advertisement meets standards of
3121 * either 10 or 10/100 or 10/100/1000 at all duplexes.
3122 * This is a function pointer entry point only called by
3123 * PHY setup routines.
3124 **/
3125static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3126{
3127 struct e1000_phy_info *phy = &hw->phy;
3128 u32 phy_ctrl;
3129 s32 ret_val = E1000_SUCCESS;
3130 u16 data;
3131
3132 DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
3133
3134 if (phy->type == e1000_phy_ife)
4be59a01 3135 return E1000_SUCCESS;
9c80d176
SZ
3136
3137 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3138
3139 if (active) {
3140 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3141 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3142
6a5a645e 3143 if (phy->type != e1000_phy_igp_3)
4be59a01 3144 return E1000_SUCCESS;
6a5a645e 3145
379ebbe7 3146 /* Call gig speed drop workaround on LPLU before accessing
9c80d176
SZ
3147 * any PHY registers
3148 */
6a5a645e 3149 if (hw->mac.type == e1000_ich8lan)
9c80d176
SZ
3150 e1000_gig_downshift_workaround_ich8lan(hw);
3151
3152 /* When LPLU is enabled, we should disable SmartSpeed */
3153 ret_val = phy->ops.read_reg(hw,
4be59a01
SZ
3154 IGP01E1000_PHY_PORT_CONFIG,
3155 &data);
379ebbe7
SZ
3156 if (ret_val)
3157 return ret_val;
9c80d176
SZ
3158 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3159 ret_val = phy->ops.write_reg(hw,
4be59a01
SZ
3160 IGP01E1000_PHY_PORT_CONFIG,
3161 data);
9c80d176 3162 if (ret_val)
4be59a01 3163 return ret_val;
9c80d176
SZ
3164 } else {
3165 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3166 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3167
6a5a645e 3168 if (phy->type != e1000_phy_igp_3)
4be59a01 3169 return E1000_SUCCESS;
6a5a645e 3170
379ebbe7 3171 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
9c80d176
SZ
3172 * during Dx states where the power conservation is most
3173 * important. During driver activity we should enable
3174 * SmartSpeed, so performance is maintained.
3175 */
3176 if (phy->smart_speed == e1000_smart_speed_on) {
3177 ret_val = phy->ops.read_reg(hw,
4be59a01
SZ
3178 IGP01E1000_PHY_PORT_CONFIG,
3179 &data);
9c80d176 3180 if (ret_val)
4be59a01 3181 return ret_val;
9c80d176
SZ
3182
3183 data |= IGP01E1000_PSCFR_SMART_SPEED;
3184 ret_val = phy->ops.write_reg(hw,
4be59a01
SZ
3185 IGP01E1000_PHY_PORT_CONFIG,
3186 data);
9c80d176 3187 if (ret_val)
4be59a01 3188 return ret_val;
9c80d176
SZ
3189 } else if (phy->smart_speed == e1000_smart_speed_off) {
3190 ret_val = phy->ops.read_reg(hw,
4be59a01
SZ
3191 IGP01E1000_PHY_PORT_CONFIG,
3192 &data);
9c80d176 3193 if (ret_val)
4be59a01 3194 return ret_val;
9c80d176
SZ
3195
3196 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3197 ret_val = phy->ops.write_reg(hw,
4be59a01
SZ
3198 IGP01E1000_PHY_PORT_CONFIG,
3199 data);
9c80d176 3200 if (ret_val)
4be59a01 3201 return ret_val;
9c80d176
SZ
3202 }
3203 }
3204
4be59a01 3205 return E1000_SUCCESS;
9c80d176
SZ
3206}
3207
3208/**
3209 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3210 * @hw: pointer to the HW structure
3211 * @active: TRUE to enable LPLU, FALSE to disable
3212 *
3213 * Sets the LPLU D3 state according to the active flag. When
3214 * activating LPLU this function also disables smart speed
3215 * and vice versa. LPLU will not be activated unless the
3216 * device autonegotiation advertisement meets standards of
3217 * either 10 or 10/100 or 10/100/1000 at all duplexes.
3218 * This is a function pointer entry point only called by
3219 * PHY setup routines.
3220 **/
3221static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3222{
3223 struct e1000_phy_info *phy = &hw->phy;
3224 u32 phy_ctrl;
3225 s32 ret_val = E1000_SUCCESS;
3226 u16 data;
3227
3228 DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3229
3230 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3231
3232 if (!active) {
3233 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3234 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
6a5a645e
SZ
3235
3236 if (phy->type != e1000_phy_igp_3)
4be59a01 3237 return E1000_SUCCESS;
6a5a645e 3238
379ebbe7 3239 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
9c80d176
SZ
3240 * during Dx states where the power conservation is most
3241 * important. During driver activity we should enable
3242 * SmartSpeed, so performance is maintained.
3243 */
3244 if (phy->smart_speed == e1000_smart_speed_on) {
3245 ret_val = phy->ops.read_reg(hw,
4be59a01
SZ
3246 IGP01E1000_PHY_PORT_CONFIG,
3247 &data);
9c80d176 3248 if (ret_val)
4be59a01 3249 return ret_val;
9c80d176
SZ
3250
3251 data |= IGP01E1000_PSCFR_SMART_SPEED;
3252 ret_val = phy->ops.write_reg(hw,
4be59a01
SZ
3253 IGP01E1000_PHY_PORT_CONFIG,
3254 data);
9c80d176 3255 if (ret_val)
4be59a01 3256 return ret_val;
9c80d176
SZ
3257 } else if (phy->smart_speed == e1000_smart_speed_off) {
3258 ret_val = phy->ops.read_reg(hw,
4be59a01
SZ
3259 IGP01E1000_PHY_PORT_CONFIG,
3260 &data);
9c80d176 3261 if (ret_val)
4be59a01 3262 return ret_val;
9c80d176
SZ
3263
3264 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3265 ret_val = phy->ops.write_reg(hw,
4be59a01
SZ
3266 IGP01E1000_PHY_PORT_CONFIG,
3267 data);
9c80d176 3268 if (ret_val)
4be59a01 3269 return ret_val;
9c80d176
SZ
3270 }
3271 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
4be59a01
SZ
3272 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3273 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
9c80d176
SZ
3274 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3275 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3276
6a5a645e 3277 if (phy->type != e1000_phy_igp_3)
4be59a01 3278 return E1000_SUCCESS;
6a5a645e 3279
379ebbe7 3280 /* Call gig speed drop workaround on LPLU before accessing
9c80d176
SZ
3281 * any PHY registers
3282 */
6a5a645e 3283 if (hw->mac.type == e1000_ich8lan)
9c80d176
SZ
3284 e1000_gig_downshift_workaround_ich8lan(hw);
3285
3286 /* When LPLU is enabled, we should disable SmartSpeed */
3287 ret_val = phy->ops.read_reg(hw,
4be59a01
SZ
3288 IGP01E1000_PHY_PORT_CONFIG,
3289 &data);
9c80d176 3290 if (ret_val)
4be59a01 3291 return ret_val;
9c80d176
SZ
3292
3293 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3294 ret_val = phy->ops.write_reg(hw,
4be59a01
SZ
3295 IGP01E1000_PHY_PORT_CONFIG,
3296 data);
9c80d176
SZ
3297 }
3298
9c80d176
SZ
3299 return ret_val;
3300}
3301
3302/**
3303 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3304 * @hw: pointer to the HW structure
3305 * @bank: pointer to the variable that returns the active bank
3306 *
3307 * Reads signature byte from the NVM using the flash access registers.
6a5a645e 3308 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
9c80d176
SZ
3309 **/
3310static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3311{
6a5a645e 3312 u32 eecd;
9c80d176 3313 struct e1000_nvm_info *nvm = &hw->nvm;
9c80d176
SZ
3314 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3315 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
6a5a645e 3316 u8 sig_byte = 0;
4be59a01 3317 s32 ret_val;
9c80d176 3318
6a5a645e 3319 DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
9c80d176 3320
6a5a645e 3321 switch (hw->mac.type) {
524ce499
SZ
3322 case e1000_pch_spt:
3323 *bank = E1000_READ_REG(hw, E1000_CTRL_EXT) & E1000_CTRL_EXT_NVMVS;
3324 if (*bank == 0 || *bank == 1) {
3325 return -E1000_ERR_NVM;
3326 } else {
3327 *bank = *bank - 2;
3328 return 0;
3329 }
3330 break;
6a5a645e
SZ
3331 case e1000_ich8lan:
3332 case e1000_ich9lan:
3333 eecd = E1000_READ_REG(hw, E1000_EECD);
3334 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3335 E1000_EECD_SEC1VAL_VALID_MASK) {
3336 if (eecd & E1000_EECD_SEC1VAL)
9c80d176 3337 *bank = 1;
6a5a645e
SZ
3338 else
3339 *bank = 0;
3340
4be59a01 3341 return E1000_SUCCESS;
6a5a645e 3342 }
4be59a01 3343 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
6a5a645e
SZ
3344 /* fall-thru */
3345 default:
3346 /* set bank to 0 in case flash read fails */
3347 *bank = 0;
3348
3349 /* Check bank 0 */
3350 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
4be59a01 3351 &sig_byte);
6a5a645e 3352 if (ret_val)
4be59a01 3353 return ret_val;
6a5a645e
SZ
3354 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3355 E1000_ICH_NVM_SIG_VALUE) {
3356 *bank = 0;
4be59a01 3357 return E1000_SUCCESS;
6a5a645e
SZ
3358 }
3359
3360 /* Check bank 1 */
3361 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
4be59a01
SZ
3362 bank1_offset,
3363 &sig_byte);
6a5a645e 3364 if (ret_val)
4be59a01 3365 return ret_val;
6a5a645e
SZ
3366 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3367 E1000_ICH_NVM_SIG_VALUE) {
3368 *bank = 1;
4be59a01 3369 return E1000_SUCCESS;
9c80d176 3370 }
9c80d176 3371
6a5a645e 3372 DEBUGOUT("ERROR: No valid NVM bank present\n");
4be59a01 3373 return -E1000_ERR_NVM;
6a5a645e 3374 }
9c80d176
SZ
3375}
3376
524ce499
SZ
3377/**
3378 * e1000_read_nvm_spt - Read word(s) from the NVM
3379 * @hw: pointer to the HW structure
3380 * @offset: The offset (in bytes) of the word(s) to read.
3381 * @words: Size of data to read in words
3382 * @data: Pointer to the word(s) to read at offset.
3383 *
3384 * Reads a word(s) from the NVM using the flash access registers.
3385 **/
3386static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
3387 u16 *data)
3388{
3389 struct e1000_nvm_info *nvm = &hw->nvm;
3390 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3391 u32 act_offset;
3392 s32 ret_val = E1000_SUCCESS;
3393 u32 bank = 0;
3394 u32 dword;
3395 u16 use_offset;
3396 u16 i;
3397
3398 DEBUGFUNC("e1000_read_nvm_spt");
3399
3400 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3401 (words == 0)) {
3402 DEBUGOUT("nvm parameter(s) out of bounds\n");
3403 ret_val = -E1000_ERR_NVM;
3404 goto out;
3405 }
3406
3407 nvm->ops.acquire(hw);
3408
3409 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3410 if (ret_val != E1000_SUCCESS) {
3411 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3412 bank = 0;
3413 }
3414
3415 act_offset = (bank) ? nvm->flash_bank_size : 0;
3416 act_offset += offset;
3417
3418 ret_val = E1000_SUCCESS;
3419 for (i = 0; i < words; i += 2) {
3420 if (words - i == 1) {
3421 if (dev_spec->shadow_ram[offset+i].modified) {
3422 data[i] = dev_spec->shadow_ram[offset+i].value;
3423 } else {
3424 use_offset = act_offset + i -
3425 (act_offset + i) % 2;
3426 ret_val = e1000_read_flash_dword_ich8lan(
3427 hw,
3428 use_offset,
3429 &dword);