ig_hal/igb: Merge Intel igb-2.4.3
[dragonfly.git] / sys / dev / netif / ig_hal / e1000_i210.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2013, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #include "e1000_api.h"
36
37
38 static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw);
39 static void e1000_release_nvm_i210(struct e1000_hw *hw);
40 static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw);
41 static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
42                                 u16 *data);
43 static s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw);
44 static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
45
46 /**
47  *  e1000_acquire_nvm_i210 - Request for access to EEPROM
48  *  @hw: pointer to the HW structure
49  *
50  *  Acquire the necessary semaphores for exclusive access to the EEPROM.
51  *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
52  *  Return successful if access grant bit set, else clear the request for
53  *  EEPROM access and return -E1000_ERR_NVM (-1).
54  **/
55 static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw)
56 {
57         s32 ret_val;
58
59         DEBUGFUNC("e1000_acquire_nvm_i210");
60
61         ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
62
63         return ret_val;
64 }
65
66 /**
67  *  e1000_release_nvm_i210 - Release exclusive access to EEPROM
68  *  @hw: pointer to the HW structure
69  *
70  *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
71  *  then release the semaphores acquired.
72  **/
73 static void e1000_release_nvm_i210(struct e1000_hw *hw)
74 {
75         DEBUGFUNC("e1000_release_nvm_i210");
76
77         e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
78 }
79
80 /**
81  *  e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
82  *  @hw: pointer to the HW structure
83  *  @mask: specifies which semaphore to acquire
84  *
85  *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
86  *  will also specify which port we're acquiring the lock for.
87  **/
88 s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
89 {
90         u32 swfw_sync;
91         u32 swmask = mask;
92         u32 fwmask = mask << 16;
93         s32 ret_val = E1000_SUCCESS;
94         s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
95
96         DEBUGFUNC("e1000_acquire_swfw_sync_i210");
97
98         while (i < timeout) {
99                 if (e1000_get_hw_semaphore_i210(hw)) {
100                         ret_val = -E1000_ERR_SWFW_SYNC;
101                         goto out;
102                 }
103
104                 swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
105                 if (!(swfw_sync & (fwmask | swmask)))
106                         break;
107
108                 /*
109                  * Firmware currently using resource (fwmask)
110                  * or other software thread using resource (swmask)
111                  */
112                 e1000_put_hw_semaphore_generic(hw);
113                 msec_delay_irq(5);
114                 i++;
115         }
116
117         if (i == timeout) {
118                 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
119                 ret_val = -E1000_ERR_SWFW_SYNC;
120                 goto out;
121         }
122
123         swfw_sync |= swmask;
124         E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
125
126         e1000_put_hw_semaphore_generic(hw);
127
128 out:
129         return ret_val;
130 }
131
132 /**
133  *  e1000_release_swfw_sync_i210 - Release SW/FW semaphore
134  *  @hw: pointer to the HW structure
135  *  @mask: specifies which semaphore to acquire
136  *
137  *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
138  *  will also specify which port we're releasing the lock for.
139  **/
140 void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
141 {
142         u32 swfw_sync;
143
144         DEBUGFUNC("e1000_release_swfw_sync_i210");
145
146         while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS)
147                 ; /* Empty */
148
149         swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
150         swfw_sync &= ~mask;
151         E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
152
153         e1000_put_hw_semaphore_generic(hw);
154 }
155
156 /**
157  *  e1000_get_hw_semaphore_i210 - Acquire hardware semaphore
158  *  @hw: pointer to the HW structure
159  *
160  *  Acquire the HW semaphore to access the PHY or NVM
161  **/
162 static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw)
163 {
164         u32 swsm;
165         s32 timeout = hw->nvm.word_size + 1;
166         s32 i = 0;
167
168         DEBUGFUNC("e1000_get_hw_semaphore_i210");
169
170         /* Get the SW semaphore */
171         while (i < timeout) {
172                 swsm = E1000_READ_REG(hw, E1000_SWSM);
173                 if (!(swsm & E1000_SWSM_SMBI))
174                         break;
175
176                 usec_delay(50);
177                 i++;
178         }
179
180         if (i == timeout) {
181                 /* In rare circumstances, the SW semaphore may already be held
182                  * unintentionally. Clear the semaphore once before giving up.
183                  */
184                 if (hw->dev_spec._82575.clear_semaphore_once) {
185                         hw->dev_spec._82575.clear_semaphore_once = FALSE;
186                         e1000_put_hw_semaphore_generic(hw);
187                         for (i = 0; i < timeout; i++) {
188                                 swsm = E1000_READ_REG(hw, E1000_SWSM);
189                                 if (!(swsm & E1000_SWSM_SMBI))
190                                         break;
191
192                                 usec_delay(50);
193                         }
194                 }
195
196                 /* If we do not have the semaphore here, we have to give up. */
197                 if (i == timeout) {
198                         DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
199                         return -E1000_ERR_NVM;
200                 }
201         }
202
203         /* Get the FW semaphore. */
204         for (i = 0; i < timeout; i++) {
205                 swsm = E1000_READ_REG(hw, E1000_SWSM);
206                 E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
207
208                 /* Semaphore acquired if bit latched */
209                 if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
210                         break;
211
212                 usec_delay(50);
213         }
214
215         if (i == timeout) {
216                 /* Release semaphores */
217                 e1000_put_hw_semaphore_generic(hw);
218                 DEBUGOUT("Driver can't access the NVM\n");
219                 return -E1000_ERR_NVM;
220         }
221
222         return E1000_SUCCESS;
223 }
224
225 /**
226  *  e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
227  *  @hw: pointer to the HW structure
228  *  @offset: offset of word in the Shadow Ram to read
229  *  @words: number of words to read
230  *  @data: word read from the Shadow Ram
231  *
232  *  Reads a 16 bit word from the Shadow Ram using the EERD register.
233  *  Uses necessary synchronization semaphores.
234  **/
235 s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
236                              u16 *data)
237 {
238         s32 status = E1000_SUCCESS;
239         u16 i, count;
240
241         DEBUGFUNC("e1000_read_nvm_srrd_i210");
242
243         /* We cannot hold synchronization semaphores for too long,
244          * because of forceful takeover procedure. However it is more efficient
245          * to read in bursts than synchronizing access for each word. */
246         for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
247                 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
248                         E1000_EERD_EEWR_MAX_COUNT : (words - i);
249                 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
250                         status = e1000_read_nvm_eerd(hw, offset, count,
251                                                      data + i);
252                         hw->nvm.ops.release(hw);
253                 } else {
254                         status = E1000_ERR_SWFW_SYNC;
255                 }
256
257                 if (status != E1000_SUCCESS)
258                         break;
259         }
260
261         return status;
262 }
263
264 /**
265  *  e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
266  *  @hw: pointer to the HW structure
267  *  @offset: offset within the Shadow RAM to be written to
268  *  @words: number of words to write
269  *  @data: 16 bit word(s) to be written to the Shadow RAM
270  *
271  *  Writes data to Shadow RAM at offset using EEWR register.
272  *
273  *  If e1000_update_nvm_checksum is not called after this function , the
274  *  data will not be committed to FLASH and also Shadow RAM will most likely
275  *  contain an invalid checksum.
276  *
277  *  If error code is returned, data and Shadow RAM may be inconsistent - buffer
278  *  partially written.
279  **/
280 s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
281                               u16 *data)
282 {
283         s32 status = E1000_SUCCESS;
284         u16 i, count;
285
286         DEBUGFUNC("e1000_write_nvm_srwr_i210");
287
288         /* We cannot hold synchronization semaphores for too long,
289          * because of forceful takeover procedure. However it is more efficient
290          * to write in bursts than synchronizing access for each word. */
291         for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
292                 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
293                         E1000_EERD_EEWR_MAX_COUNT : (words - i);
294                 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
295                         status = e1000_write_nvm_srwr(hw, offset, count,
296                                                       data + i);
297                         hw->nvm.ops.release(hw);
298                 } else {
299                         status = E1000_ERR_SWFW_SYNC;
300                 }
301
302                 if (status != E1000_SUCCESS)
303                         break;
304         }
305
306         return status;
307 }
308
309 /**
310  *  e1000_write_nvm_srwr - Write to Shadow Ram using EEWR
311  *  @hw: pointer to the HW structure
312  *  @offset: offset within the Shadow Ram to be written to
313  *  @words: number of words to write
314  *  @data: 16 bit word(s) to be written to the Shadow Ram
315  *
316  *  Writes data to Shadow Ram at offset using EEWR register.
317  *
318  *  If e1000_update_nvm_checksum is not called after this function , the
319  *  Shadow Ram will most likely contain an invalid checksum.
320  **/
321 static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
322                                 u16 *data)
323 {
324         struct e1000_nvm_info *nvm = &hw->nvm;
325         u32 i, k, eewr = 0;
326         u32 attempts = 100000;
327         s32 ret_val = E1000_SUCCESS;
328
329         DEBUGFUNC("e1000_write_nvm_srwr");
330
331         /*
332          * A check for invalid values:  offset too large, too many words,
333          * too many words for the offset, and not enough words.
334          */
335         if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
336             (words == 0)) {
337                 DEBUGOUT("nvm parameter(s) out of bounds\n");
338                 ret_val = -E1000_ERR_NVM;
339                 goto out;
340         }
341
342         for (i = 0; i < words; i++) {
343                 eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
344                         (data[i] << E1000_NVM_RW_REG_DATA) |
345                         E1000_NVM_RW_REG_START;
346
347                 E1000_WRITE_REG(hw, E1000_SRWR, eewr);
348
349                 for (k = 0; k < attempts; k++) {
350                         if (E1000_NVM_RW_REG_DONE &
351                             E1000_READ_REG(hw, E1000_SRWR)) {
352                                 ret_val = E1000_SUCCESS;
353                                 break;
354                         }
355                         usec_delay(5);
356                 }
357
358                 if (ret_val != E1000_SUCCESS) {
359                         DEBUGOUT("Shadow RAM write EEWR timed out\n");
360                         break;
361                 }
362         }
363
364 out:
365         return ret_val;
366 }
367
368 /** e1000_read_invm_word_i210 - Reads OTP
369  *  @hw: pointer to the HW structure
370  *  @address: the word address (aka eeprom offset) to read
371  *  @data: pointer to the data read
372  *
373  *  Reads 16-bit words from the OTP. Return error when the word is not
374  *  stored in OTP.
375  **/
376 static s32 e1000_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
377 {
378         s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
379         u32 invm_dword;
380         u16 i;
381         u8 record_type, word_address;
382
383         DEBUGFUNC("e1000_read_invm_word_i210");
384
385         for (i = 0; i < E1000_INVM_SIZE; i++) {
386                 invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
387                 /* Get record type */
388                 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
389                 if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
390                         break;
391                 if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
392                         i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
393                 if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
394                         i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
395                 if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
396                         word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
397                         if (word_address == address) {
398                                 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
399                                 DEBUGOUT2("Read INVM Word 0x%02x = %x",
400                                           address, *data);
401                                 status = E1000_SUCCESS;
402                                 break;
403                         }
404                 }
405         }
406         if (status != E1000_SUCCESS)
407                 DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address);
408         return status;
409 }
410
411 /** e1000_read_invm_i210 - Read invm wrapper function for I210/I211
412  *  @hw: pointer to the HW structure
413  *  @address: the word address (aka eeprom offset) to read
414  *  @data: pointer to the data read
415  *
416  *  Wrapper function to return data formerly found in the NVM.
417  **/
418 static s32 e1000_read_invm_i210(struct e1000_hw *hw, u16 offset,
419                                 u16 E1000_UNUSEDARG words, u16 *data)
420 {
421         s32 ret_val = E1000_SUCCESS;
422
423         DEBUGFUNC("e1000_read_invm_i210");
424
425         /* Only the MAC addr is required to be present in the iNVM */
426         switch (offset) {
427         case NVM_MAC_ADDR:
428                 ret_val = e1000_read_invm_word_i210(hw, (u8)offset, &data[0]);
429                 ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+1,
430                                                      &data[1]);
431                 ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+2,
432                                                      &data[2]);
433                 if (ret_val != E1000_SUCCESS)
434                         DEBUGOUT("MAC Addr not found in iNVM\n");
435                 break;
436         case NVM_INIT_CTRL_2:
437                 ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
438                 if (ret_val != E1000_SUCCESS) {
439                         *data = NVM_INIT_CTRL_2_DEFAULT_I211;
440                         ret_val = E1000_SUCCESS;
441                 }
442                 break;
443         case NVM_INIT_CTRL_4:
444                 ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
445                 if (ret_val != E1000_SUCCESS) {
446                         *data = NVM_INIT_CTRL_4_DEFAULT_I211;
447                         ret_val = E1000_SUCCESS;
448                 }
449                 break;
450         case NVM_LED_1_CFG:
451                 ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
452                 if (ret_val != E1000_SUCCESS) {
453                         *data = NVM_LED_1_CFG_DEFAULT_I211;
454                         ret_val = E1000_SUCCESS;
455                 }
456                 break;
457         case NVM_LED_0_2_CFG:
458                 ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
459                 if (ret_val != E1000_SUCCESS) {
460                         *data = NVM_LED_0_2_CFG_DEFAULT_I211;
461                         ret_val = E1000_SUCCESS;
462                 }
463                 break;
464         case NVM_ID_LED_SETTINGS:
465                 ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
466                 if (ret_val != E1000_SUCCESS) {
467                         *data = ID_LED_RESERVED_FFFF;
468                         ret_val = E1000_SUCCESS;
469                 }
470                 break;
471         case NVM_SUB_DEV_ID:
472                 *data = hw->subsystem_device_id;
473                 break;
474         case NVM_SUB_VEN_ID:
475                 *data = hw->subsystem_vendor_id;
476                 break;
477         case NVM_DEV_ID:
478                 *data = hw->device_id;
479                 break;
480         case NVM_VEN_ID:
481                 *data = hw->vendor_id;
482                 break;
483         default:
484                 DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset);
485                 *data = NVM_RESERVED_WORD;
486                 break;
487         }
488         return ret_val;
489 }
490
491 /**
492  *  e1000_read_invm_version - Reads iNVM version and image type
493  *  @hw: pointer to the HW structure
494  *  @invm_ver: version structure for the version read
495  *
496  *  Reads iNVM version and image type.
497  **/
498 s32 e1000_read_invm_version(struct e1000_hw *hw,
499                             struct e1000_fw_version *invm_ver)
500 {
501         u32 *record = NULL;
502         u32 *next_record = NULL;
503         u32 i = 0;
504         u32 invm_dword = 0;
505         u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
506                                              E1000_INVM_RECORD_SIZE_IN_BYTES);
507         u32 buffer[E1000_INVM_SIZE];
508         s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
509         u16 version = 0;
510
511         DEBUGFUNC("e1000_read_invm_version");
512
513         /* Read iNVM memory */
514         for (i = 0; i < E1000_INVM_SIZE; i++) {
515                 invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
516                 buffer[i] = invm_dword;
517         }
518
519         /* Read version number */
520         for (i = 1; i < invm_blocks; i++) {
521                 record = &buffer[invm_blocks - i];
522                 next_record = &buffer[invm_blocks - i + 1];
523
524                 /* Check if we have first version location used */
525                 if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
526                         version = 0;
527                         status = E1000_SUCCESS;
528                         break;
529                 }
530                 /* Check if we have second version location used */
531                 else if ((i == 1) &&
532                          ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
533                         version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
534                         status = E1000_SUCCESS;
535                         break;
536                 }
537                 /*
538                  * Check if we have odd version location
539                  * used and it is the last one used
540                  */
541                 else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
542                          ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
543                          (i != 1))) {
544                         version = (*next_record & E1000_INVM_VER_FIELD_TWO)
545                                   >> 13;
546                         status = E1000_SUCCESS;
547                         break;
548                 }
549                 /*
550                  * Check if we have even version location
551                  * used and it is the last one used
552                  */
553                 else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
554                          ((*record & 0x3) == 0)) {
555                         version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
556                         status = E1000_SUCCESS;
557                         break;
558                 }
559         }
560
561         if (status == E1000_SUCCESS) {
562                 invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
563                                         >> E1000_INVM_MAJOR_SHIFT;
564                 invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
565         }
566         /* Read Image Type */
567         for (i = 1; i < invm_blocks; i++) {
568                 record = &buffer[invm_blocks - i];
569                 next_record = &buffer[invm_blocks - i + 1];
570
571                 /* Check if we have image type in first location used */
572                 if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
573                         invm_ver->invm_img_type = 0;
574                         status = E1000_SUCCESS;
575                         break;
576                 }
577                 /* Check if we have image type in first location used */
578                 else if ((((*record & 0x3) == 0) &&
579                          ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
580                          ((((*record & 0x3) != 0) && (i != 1)))) {
581                         invm_ver->invm_img_type =
582                                 (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
583                         status = E1000_SUCCESS;
584                         break;
585                 }
586         }
587         return status;
588 }
589
590 /**
591  *  e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum
592  *  @hw: pointer to the HW structure
593  *
594  *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
595  *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
596  **/
597 s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw)
598 {
599         s32 status = E1000_SUCCESS;
600         s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
601
602         DEBUGFUNC("e1000_validate_nvm_checksum_i210");
603
604         if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
605
606                 /*
607                  * Replace the read function with semaphore grabbing with
608                  * the one that skips this for a while.
609                  * We have semaphore taken already here.
610                  */
611                 read_op_ptr = hw->nvm.ops.read;
612                 hw->nvm.ops.read = e1000_read_nvm_eerd;
613
614                 status = e1000_validate_nvm_checksum_generic(hw);
615
616                 /* Revert original read operation. */
617                 hw->nvm.ops.read = read_op_ptr;
618
619                 hw->nvm.ops.release(hw);
620         } else {
621                 status = E1000_ERR_SWFW_SYNC;
622         }
623
624         return status;
625 }
626
627
628 /**
629  *  e1000_update_nvm_checksum_i210 - Update EEPROM checksum
630  *  @hw: pointer to the HW structure
631  *
632  *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
633  *  up to the checksum.  Then calculates the EEPROM checksum and writes the
634  *  value to the EEPROM. Next commit EEPROM data onto the Flash.
635  **/
636 s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw)
637 {
638         s32 ret_val;
639         u16 checksum = 0;
640         u16 i, nvm_data;
641
642         DEBUGFUNC("e1000_update_nvm_checksum_i210");
643
644         /*
645          * Read the first word from the EEPROM. If this times out or fails, do
646          * not continue or we could be in for a very long wait while every
647          * EEPROM read fails
648          */
649         ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data);
650         if (ret_val != E1000_SUCCESS) {
651                 DEBUGOUT("EEPROM read failed\n");
652                 goto out;
653         }
654
655         if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
656                 /*
657                  * Do not use hw->nvm.ops.write, hw->nvm.ops.read
658                  * because we do not want to take the synchronization
659                  * semaphores twice here.
660                  */
661
662                 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
663                         ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data);
664                         if (ret_val) {
665                                 hw->nvm.ops.release(hw);
666                                 DEBUGOUT("NVM Read Error while updating checksum.\n");
667                                 goto out;
668                         }
669                         checksum += nvm_data;
670                 }
671                 checksum = (u16) NVM_SUM - checksum;
672                 ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
673                                                 &checksum);
674                 if (ret_val != E1000_SUCCESS) {
675                         hw->nvm.ops.release(hw);
676                         DEBUGOUT("NVM Write Error while updating checksum.\n");
677                         goto out;
678                 }
679
680                 hw->nvm.ops.release(hw);
681
682                 ret_val = e1000_update_flash_i210(hw);
683         } else {
684                 ret_val = E1000_ERR_SWFW_SYNC;
685         }
686 out:
687         return ret_val;
688 }
689
690 /**
691  *  e1000_get_flash_presence_i210 - Check if flash device is detected.
692  *  @hw: pointer to the HW structure
693  *
694  **/
695 bool e1000_get_flash_presence_i210(struct e1000_hw *hw)
696 {
697         u32 eec = 0;
698         bool ret_val = FALSE;
699
700         DEBUGFUNC("e1000_get_flash_presence_i210");
701
702         eec = E1000_READ_REG(hw, E1000_EECD);
703
704         if (eec & E1000_EECD_FLASH_DETECTED_I210)
705                 ret_val = TRUE;
706
707         return ret_val;
708 }
709
710 /**
711  *  e1000_update_flash_i210 - Commit EEPROM to the flash
712  *  @hw: pointer to the HW structure
713  *
714  **/
715 s32 e1000_update_flash_i210(struct e1000_hw *hw)
716 {
717         s32 ret_val;
718         u32 flup;
719
720         DEBUGFUNC("e1000_update_flash_i210");
721
722         ret_val = e1000_pool_flash_update_done_i210(hw);
723         if (ret_val == -E1000_ERR_NVM) {
724                 DEBUGOUT("Flash update time out\n");
725                 goto out;
726         }
727
728         flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210;
729         E1000_WRITE_REG(hw, E1000_EECD, flup);
730
731         ret_val = e1000_pool_flash_update_done_i210(hw);
732         if (ret_val == E1000_SUCCESS)
733                 DEBUGOUT("Flash update complete\n");
734         else
735                 DEBUGOUT("Flash update time out\n");
736
737 out:
738         return ret_val;
739 }
740
741 /**
742  *  e1000_pool_flash_update_done_i210 - Pool FLUDONE status.
743  *  @hw: pointer to the HW structure
744  *
745  **/
746 s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw)
747 {
748         s32 ret_val = -E1000_ERR_NVM;
749         u32 i, reg;
750
751         DEBUGFUNC("e1000_pool_flash_update_done_i210");
752
753         for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
754                 reg = E1000_READ_REG(hw, E1000_EECD);
755                 if (reg & E1000_EECD_FLUDONE_I210) {
756                         ret_val = E1000_SUCCESS;
757                         break;
758                 }
759                 usec_delay(5);
760         }
761
762         return ret_val;
763 }
764
765 /**
766  *  e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers
767  *  @hw: pointer to the HW structure
768  *
769  *  Initialize the i210/i211 NVM parameters and function pointers.
770  **/
771 static s32 e1000_init_nvm_params_i210(struct e1000_hw *hw)
772 {
773         s32 ret_val;
774         struct e1000_nvm_info *nvm = &hw->nvm;
775
776         DEBUGFUNC("e1000_init_nvm_params_i210");
777
778         ret_val = e1000_init_nvm_params_82575(hw);
779         nvm->ops.acquire = e1000_acquire_nvm_i210;
780         nvm->ops.release = e1000_release_nvm_i210;
781         nvm->ops.valid_led_default = e1000_valid_led_default_i210;
782         if (e1000_get_flash_presence_i210(hw)) {
783                 hw->nvm.type = e1000_nvm_flash_hw;
784                 nvm->ops.read    = e1000_read_nvm_srrd_i210;
785                 nvm->ops.write   = e1000_write_nvm_srwr_i210;
786                 nvm->ops.validate = e1000_validate_nvm_checksum_i210;
787                 nvm->ops.update   = e1000_update_nvm_checksum_i210;
788         } else {
789                 hw->nvm.type = e1000_nvm_invm;
790                 nvm->ops.read     = e1000_read_invm_i210;
791                 nvm->ops.write    = e1000_null_write_nvm;
792                 nvm->ops.validate = e1000_null_ops_generic;
793                 nvm->ops.update   = e1000_null_ops_generic;
794         }
795         return ret_val;
796 }
797
798 /**
799  *  e1000_init_function_pointers_i210 - Init func ptrs.
800  *  @hw: pointer to the HW structure
801  *
802  *  Called to initialize all function pointers and parameters.
803  **/
804 void e1000_init_function_pointers_i210(struct e1000_hw *hw)
805 {
806         e1000_init_function_pointers_82575(hw);
807         hw->nvm.ops.init_params = e1000_init_nvm_params_i210;
808
809         return;
810 }
811
812 /**
813  *  e1000_valid_led_default_i210 - Verify a valid default LED config
814  *  @hw: pointer to the HW structure
815  *  @data: pointer to the NVM (EEPROM)
816  *
817  *  Read the EEPROM for the current default LED configuration.  If the
818  *  LED configuration is not valid, set to a valid LED configuration.
819  **/
820 static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
821 {
822         s32 ret_val;
823
824         DEBUGFUNC("e1000_valid_led_default_i210");
825
826         ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
827         if (ret_val) {
828                 DEBUGOUT("NVM Read Error\n");
829                 goto out;
830         }
831
832         if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
833                 switch (hw->phy.media_type) {
834                 case e1000_media_type_internal_serdes:
835                         *data = ID_LED_DEFAULT_I210_SERDES;
836                         break;
837                 case e1000_media_type_copper:
838                 default:
839                         *data = ID_LED_DEFAULT_I210;
840                         break;
841                 }
842         }
843 out:
844         return ret_val;
845 }
846
847 /**
848  *  __e1000_access_xmdio_reg - Read/write XMDIO register
849  *  @hw: pointer to the HW structure
850  *  @address: XMDIO address to program
851  *  @dev_addr: device address to program
852  *  @data: pointer to value to read/write from/to the XMDIO address
853  *  @read: boolean flag to indicate read or write
854  **/
855 static s32 __e1000_access_xmdio_reg(struct e1000_hw *hw, u16 address,
856                                     u8 dev_addr, u16 *data, bool read)
857 {
858         s32 ret_val;
859
860         DEBUGFUNC("__e1000_access_xmdio_reg");
861
862         ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
863         if (ret_val)
864                 return ret_val;
865
866         ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address);
867         if (ret_val)
868                 return ret_val;
869
870         ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA |
871                                                          dev_addr);
872         if (ret_val)
873                 return ret_val;
874
875         if (read)
876                 ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data);
877         else
878                 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data);
879         if (ret_val)
880                 return ret_val;
881
882         /* Recalibrate the device back to 0 */
883         ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0);
884         if (ret_val)
885                 return ret_val;
886
887         return ret_val;
888 }
889
890 /**
891  *  e1000_read_xmdio_reg - Read XMDIO register
892  *  @hw: pointer to the HW structure
893  *  @addr: XMDIO address to program
894  *  @dev_addr: device address to program
895  *  @data: value to be read from the EMI address
896  **/
897 s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data)
898 {
899         DEBUGFUNC("e1000_read_xmdio_reg");
900
901         return __e1000_access_xmdio_reg(hw, addr, dev_addr, data, TRUE);
902 }
903
904 /**
905  *  e1000_write_xmdio_reg - Write XMDIO register
906  *  @hw: pointer to the HW structure
907  *  @addr: XMDIO address to program
908  *  @dev_addr: device address to program
909  *  @data: value to be written to the XMDIO address
910  **/
911 s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
912 {
913         DEBUGFUNC("e1000_read_xmdio_reg");
914
915         return __e1000_access_xmdio_reg(hw, addr, dev_addr, &data, FALSE);
916 }
917
918 /**
919  * e1000_pll_workaround_i210
920  * @hw: pointer to the HW structure
921  *
922  * Works around an errata in the PLL circuit where it occasionally
923  * provides the wrong clock frequency after power up.
924  **/
925 static s32 e1000_pll_workaround_i210(struct e1000_hw *hw)
926 {
927         s32 ret_val;
928         u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
929         u16 nvm_word, phy_word, pci_word, tmp_nvm;
930         int i;
931
932         /* Get and set needed register values */
933         wuc = E1000_READ_REG(hw, E1000_WUC);
934         mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG);
935         reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
936         E1000_WRITE_REG(hw, E1000_MDICNFG, reg_val);
937
938         /* Get data from NVM, or set default */
939         ret_val = e1000_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
940                                             &nvm_word);
941         if (ret_val != E1000_SUCCESS)
942                 nvm_word = E1000_INVM_DEFAULT_AL;
943         tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
944         for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
945                 /* check current state directly from internal PHY */
946                 e1000_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
947                                          E1000_PHY_PLL_FREQ_REG), &phy_word);
948                 if ((phy_word & E1000_PHY_PLL_UNCONF)
949                     != E1000_PHY_PLL_UNCONF) {
950                         ret_val = E1000_SUCCESS;
951                         break;
952                 } else {
953                         ret_val = -E1000_ERR_PHY;
954                 }
955                 /* directly reset the internal PHY */
956                 ctrl = E1000_READ_REG(hw, E1000_CTRL);
957                 E1000_WRITE_REG(hw, E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
958
959                 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
960                 ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
961                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
962
963                 E1000_WRITE_REG(hw, E1000_WUC, 0);
964                 reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
965                 E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val);
966
967                 e1000_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
968                 pci_word |= E1000_PCI_PMCSR_D3;
969                 e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
970                 msec_delay(1);
971                 pci_word &= ~E1000_PCI_PMCSR_D3;
972                 e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
973                 reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
974                 E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val);
975
976                 /* restore WUC register */
977                 E1000_WRITE_REG(hw, E1000_WUC, wuc);
978         }
979         /* restore MDICNFG setting */
980         E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
981         return ret_val;
982 }
983
984 /**
985  *  e1000_init_hw_i210 - Init hw for I210/I211
986  *  @hw: pointer to the HW structure
987  *
988  *  Called to initialize hw for i210 hw family.
989  **/
990 s32 e1000_init_hw_i210(struct e1000_hw *hw)
991 {
992         s32 ret_val;
993
994         DEBUGFUNC("e1000_init_hw_i210");
995         if ((hw->mac.type >= e1000_i210) &&
996             !(e1000_get_flash_presence_i210(hw))) {
997                 ret_val = e1000_pll_workaround_i210(hw);
998                 if (ret_val != E1000_SUCCESS)
999                         return ret_val;
1000         }
1001         ret_val = e1000_init_hw_82575(hw);
1002         return ret_val;
1003 }