e1000_i210.c revision 330897
1/******************************************************************************
2  SPDX-License-Identifier: BSD-3-Clause
3
4  Copyright (c) 2001-2015, Intel Corporation
5  All rights reserved.
6
7  Redistribution and use in source and binary forms, with or without
8  modification, are permitted provided that the following conditions are met:
9
10   1. Redistributions of source code must retain the above copyright notice,
11      this list of conditions and the following disclaimer.
12
13   2. Redistributions in binary form must reproduce the above copyright
14      notice, this list of conditions and the following disclaimer in the
15      documentation and/or other materials provided with the distribution.
16
17   3. Neither the name of the Intel Corporation nor the names of its
18      contributors may be used to endorse or promote products derived from
19      this software without specific prior written permission.
20
21  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  POSSIBILITY OF SUCH DAMAGE.
32
33******************************************************************************/
34/*$FreeBSD: stable/11/sys/dev/e1000/e1000_i210.c 330897 2018-03-14 03:19:51Z eadler $*/
35
36#include "e1000_api.h"
37
38
39static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw);
40static void e1000_release_nvm_i210(struct e1000_hw *hw);
41static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw);
42static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
43				u16 *data);
44static s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw);
45static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
46
47/**
48 *  e1000_acquire_nvm_i210 - Request for access to EEPROM
49 *  @hw: pointer to the HW structure
50 *
51 *  Acquire the necessary semaphores for exclusive access to the EEPROM.
52 *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
53 *  Return successful if access grant bit set, else clear the request for
54 *  EEPROM access and return -E1000_ERR_NVM (-1).
55 **/
56static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw)
57{
58	s32 ret_val;
59
60	DEBUGFUNC("e1000_acquire_nvm_i210");
61
62	ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
63
64	return ret_val;
65}
66
67/**
68 *  e1000_release_nvm_i210 - Release exclusive access to EEPROM
69 *  @hw: pointer to the HW structure
70 *
71 *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
72 *  then release the semaphores acquired.
73 **/
74static void e1000_release_nvm_i210(struct e1000_hw *hw)
75{
76	DEBUGFUNC("e1000_release_nvm_i210");
77
78	e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
79}
80
81/**
82 *  e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
83 *  @hw: pointer to the HW structure
84 *  @mask: specifies which semaphore to acquire
85 *
86 *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
87 *  will also specify which port we're acquiring the lock for.
88 **/
89s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
90{
91	u32 swfw_sync;
92	u32 swmask = mask;
93	u32 fwmask = mask << 16;
94	s32 ret_val = E1000_SUCCESS;
95	s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
96
97	DEBUGFUNC("e1000_acquire_swfw_sync_i210");
98
99	while (i < timeout) {
100		if (e1000_get_hw_semaphore_i210(hw)) {
101			ret_val = -E1000_ERR_SWFW_SYNC;
102			goto out;
103		}
104
105		swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
106		if (!(swfw_sync & (fwmask | swmask)))
107			break;
108
109		/*
110		 * Firmware currently using resource (fwmask)
111		 * or other software thread using resource (swmask)
112		 */
113		e1000_put_hw_semaphore_generic(hw);
114		msec_delay_irq(5);
115		i++;
116	}
117
118	if (i == timeout) {
119		DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
120		ret_val = -E1000_ERR_SWFW_SYNC;
121		goto out;
122	}
123
124	swfw_sync |= swmask;
125	E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
126
127	e1000_put_hw_semaphore_generic(hw);
128
129out:
130	return ret_val;
131}
132
133/**
134 *  e1000_release_swfw_sync_i210 - Release SW/FW semaphore
135 *  @hw: pointer to the HW structure
136 *  @mask: specifies which semaphore to acquire
137 *
138 *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
139 *  will also specify which port we're releasing the lock for.
140 **/
141void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
142{
143	u32 swfw_sync;
144
145	DEBUGFUNC("e1000_release_swfw_sync_i210");
146
147	while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS)
148		; /* Empty */
149
150	swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
151	swfw_sync &= ~mask;
152	E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
153
154	e1000_put_hw_semaphore_generic(hw);
155}
156
157/**
158 *  e1000_get_hw_semaphore_i210 - Acquire hardware semaphore
159 *  @hw: pointer to the HW structure
160 *
161 *  Acquire the HW semaphore to access the PHY or NVM
162 **/
163static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw)
164{
165	u32 swsm;
166	s32 timeout = hw->nvm.word_size + 1;
167	s32 i = 0;
168
169	DEBUGFUNC("e1000_get_hw_semaphore_i210");
170
171	/* Get the SW semaphore */
172	while (i < timeout) {
173		swsm = E1000_READ_REG(hw, E1000_SWSM);
174		if (!(swsm & E1000_SWSM_SMBI))
175			break;
176
177		usec_delay(50);
178		i++;
179	}
180
181	if (i == timeout) {
182		/* In rare circumstances, the SW semaphore may already be held
183		 * unintentionally. Clear the semaphore once before giving up.
184		 */
185		if (hw->dev_spec._82575.clear_semaphore_once) {
186			hw->dev_spec._82575.clear_semaphore_once = FALSE;
187			e1000_put_hw_semaphore_generic(hw);
188			for (i = 0; i < timeout; i++) {
189				swsm = E1000_READ_REG(hw, E1000_SWSM);
190				if (!(swsm & E1000_SWSM_SMBI))
191					break;
192
193				usec_delay(50);
194			}
195		}
196
197		/* If we do not have the semaphore here, we have to give up. */
198		if (i == timeout) {
199			DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
200			return -E1000_ERR_NVM;
201		}
202	}
203
204	/* Get the FW semaphore. */
205	for (i = 0; i < timeout; i++) {
206		swsm = E1000_READ_REG(hw, E1000_SWSM);
207		E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
208
209		/* Semaphore acquired if bit latched */
210		if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
211			break;
212
213		usec_delay(50);
214	}
215
216	if (i == timeout) {
217		/* Release semaphores */
218		e1000_put_hw_semaphore_generic(hw);
219		DEBUGOUT("Driver can't access the NVM\n");
220		return -E1000_ERR_NVM;
221	}
222
223	return E1000_SUCCESS;
224}
225
226/**
227 *  e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
228 *  @hw: pointer to the HW structure
229 *  @offset: offset of word in the Shadow Ram to read
230 *  @words: number of words to read
231 *  @data: word read from the Shadow Ram
232 *
233 *  Reads a 16 bit word from the Shadow Ram using the EERD register.
234 *  Uses necessary synchronization semaphores.
235 **/
236s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
237			     u16 *data)
238{
239	s32 status = E1000_SUCCESS;
240	u16 i, count;
241
242	DEBUGFUNC("e1000_read_nvm_srrd_i210");
243
244	/* We cannot hold synchronization semaphores for too long,
245	 * because of forceful takeover procedure. However it is more efficient
246	 * to read in bursts than synchronizing access for each word. */
247	for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
248		count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
249			E1000_EERD_EEWR_MAX_COUNT : (words - i);
250		if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
251			status = e1000_read_nvm_eerd(hw, offset, count,
252						     data + i);
253			hw->nvm.ops.release(hw);
254		} else {
255			status = E1000_ERR_SWFW_SYNC;
256		}
257
258		if (status != E1000_SUCCESS)
259			break;
260	}
261
262	return status;
263}
264
265/**
266 *  e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
267 *  @hw: pointer to the HW structure
268 *  @offset: offset within the Shadow RAM to be written to
269 *  @words: number of words to write
270 *  @data: 16 bit word(s) to be written to the Shadow RAM
271 *
272 *  Writes data to Shadow RAM at offset using EEWR register.
273 *
274 *  If e1000_update_nvm_checksum is not called after this function , the
275 *  data will not be committed to FLASH and also Shadow RAM will most likely
276 *  contain an invalid checksum.
277 *
278 *  If error code is returned, data and Shadow RAM may be inconsistent - buffer
279 *  partially written.
280 **/
281s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
282			      u16 *data)
283{
284	s32 status = E1000_SUCCESS;
285	u16 i, count;
286
287	DEBUGFUNC("e1000_write_nvm_srwr_i210");
288
289	/* We cannot hold synchronization semaphores for too long,
290	 * because of forceful takeover procedure. However it is more efficient
291	 * to write in bursts than synchronizing access for each word. */
292	for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
293		count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
294			E1000_EERD_EEWR_MAX_COUNT : (words - i);
295		if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
296			status = e1000_write_nvm_srwr(hw, offset, count,
297						      data + i);
298			hw->nvm.ops.release(hw);
299		} else {
300			status = E1000_ERR_SWFW_SYNC;
301		}
302
303		if (status != E1000_SUCCESS)
304			break;
305	}
306
307	return status;
308}
309
310/**
311 *  e1000_write_nvm_srwr - Write to Shadow Ram using EEWR
312 *  @hw: pointer to the HW structure
313 *  @offset: offset within the Shadow Ram to be written to
314 *  @words: number of words to write
315 *  @data: 16 bit word(s) to be written to the Shadow Ram
316 *
317 *  Writes data to Shadow Ram at offset using EEWR register.
318 *
319 *  If e1000_update_nvm_checksum is not called after this function , the
320 *  Shadow Ram will most likely contain an invalid checksum.
321 **/
322static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
323				u16 *data)
324{
325	struct e1000_nvm_info *nvm = &hw->nvm;
326	u32 i, k, eewr = 0;
327	u32 attempts = 100000;
328	s32 ret_val = E1000_SUCCESS;
329
330	DEBUGFUNC("e1000_write_nvm_srwr");
331
332	/*
333	 * A check for invalid values:  offset too large, too many words,
334	 * too many words for the offset, and not enough words.
335	 */
336	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
337	    (words == 0)) {
338		DEBUGOUT("nvm parameter(s) out of bounds\n");
339		ret_val = -E1000_ERR_NVM;
340		goto out;
341	}
342
343	for (i = 0; i < words; i++) {
344		eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
345			(data[i] << E1000_NVM_RW_REG_DATA) |
346			E1000_NVM_RW_REG_START;
347
348		E1000_WRITE_REG(hw, E1000_SRWR, eewr);
349
350		for (k = 0; k < attempts; k++) {
351			if (E1000_NVM_RW_REG_DONE &
352			    E1000_READ_REG(hw, E1000_SRWR)) {
353				ret_val = E1000_SUCCESS;
354				break;
355			}
356			usec_delay(5);
357		}
358
359		if (ret_val != E1000_SUCCESS) {
360			DEBUGOUT("Shadow RAM write EEWR timed out\n");
361			break;
362		}
363	}
364
365out:
366	return ret_val;
367}
368
369/** e1000_read_invm_word_i210 - Reads OTP
370 *  @hw: pointer to the HW structure
371 *  @address: the word address (aka eeprom offset) to read
372 *  @data: pointer to the data read
373 *
374 *  Reads 16-bit words from the OTP. Return error when the word is not
375 *  stored in OTP.
376 **/
377static s32 e1000_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
378{
379	s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
380	u32 invm_dword;
381	u16 i;
382	u8 record_type, word_address;
383
384	DEBUGFUNC("e1000_read_invm_word_i210");
385
386	for (i = 0; i < E1000_INVM_SIZE; i++) {
387		invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
388		/* Get record type */
389		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
390		if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
391			break;
392		if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
393			i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
394		if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
395			i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
396		if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
397			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
398			if (word_address == address) {
399				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
400				DEBUGOUT2("Read INVM Word 0x%02x = %x",
401					  address, *data);
402				status = E1000_SUCCESS;
403				break;
404			}
405		}
406	}
407	if (status != E1000_SUCCESS)
408		DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address);
409	return status;
410}
411
412/** e1000_read_invm_i210 - Read invm wrapper function for I210/I211
413 *  @hw: pointer to the HW structure
414 *  @address: the word address (aka eeprom offset) to read
415 *  @data: pointer to the data read
416 *
417 *  Wrapper function to return data formerly found in the NVM.
418 **/
419static s32 e1000_read_invm_i210(struct e1000_hw *hw, u16 offset,
420				u16 E1000_UNUSEDARG words, u16 *data)
421{
422	s32 ret_val = E1000_SUCCESS;
423
424	DEBUGFUNC("e1000_read_invm_i210");
425
426	/* Only the MAC addr is required to be present in the iNVM */
427	switch (offset) {
428	case NVM_MAC_ADDR:
429		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, &data[0]);
430		ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+1,
431						     &data[1]);
432		ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+2,
433						     &data[2]);
434		if (ret_val != E1000_SUCCESS)
435			DEBUGOUT("MAC Addr not found in iNVM\n");
436		break;
437	case NVM_INIT_CTRL_2:
438		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
439		if (ret_val != E1000_SUCCESS) {
440			*data = NVM_INIT_CTRL_2_DEFAULT_I211;
441			ret_val = E1000_SUCCESS;
442		}
443		break;
444	case NVM_INIT_CTRL_4:
445		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
446		if (ret_val != E1000_SUCCESS) {
447			*data = NVM_INIT_CTRL_4_DEFAULT_I211;
448			ret_val = E1000_SUCCESS;
449		}
450		break;
451	case NVM_LED_1_CFG:
452		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
453		if (ret_val != E1000_SUCCESS) {
454			*data = NVM_LED_1_CFG_DEFAULT_I211;
455			ret_val = E1000_SUCCESS;
456		}
457		break;
458	case NVM_LED_0_2_CFG:
459		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
460		if (ret_val != E1000_SUCCESS) {
461			*data = NVM_LED_0_2_CFG_DEFAULT_I211;
462			ret_val = E1000_SUCCESS;
463		}
464		break;
465	case NVM_ID_LED_SETTINGS:
466		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
467		if (ret_val != E1000_SUCCESS) {
468			*data = ID_LED_RESERVED_FFFF;
469			ret_val = E1000_SUCCESS;
470		}
471		break;
472	case NVM_SUB_DEV_ID:
473		*data = hw->subsystem_device_id;
474		break;
475	case NVM_SUB_VEN_ID:
476		*data = hw->subsystem_vendor_id;
477		break;
478	case NVM_DEV_ID:
479		*data = hw->device_id;
480		break;
481	case NVM_VEN_ID:
482		*data = hw->vendor_id;
483		break;
484	default:
485		DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset);
486		*data = NVM_RESERVED_WORD;
487		break;
488	}
489	return ret_val;
490}
491
492/**
493 *  e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum
494 *  @hw: pointer to the HW structure
495 *
496 *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
497 *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
498 **/
499s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw)
500{
501	s32 status = E1000_SUCCESS;
502	s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
503
504	DEBUGFUNC("e1000_validate_nvm_checksum_i210");
505
506	if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
507
508		/*
509		 * Replace the read function with semaphore grabbing with
510		 * the one that skips this for a while.
511		 * We have semaphore taken already here.
512		 */
513		read_op_ptr = hw->nvm.ops.read;
514		hw->nvm.ops.read = e1000_read_nvm_eerd;
515
516		status = e1000_validate_nvm_checksum_generic(hw);
517
518		/* Revert original read operation. */
519		hw->nvm.ops.read = read_op_ptr;
520
521		hw->nvm.ops.release(hw);
522	} else {
523		status = E1000_ERR_SWFW_SYNC;
524	}
525
526	return status;
527}
528
529
530/**
531 *  e1000_update_nvm_checksum_i210 - Update EEPROM checksum
532 *  @hw: pointer to the HW structure
533 *
534 *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
535 *  up to the checksum.  Then calculates the EEPROM checksum and writes the
536 *  value to the EEPROM. Next commit EEPROM data onto the Flash.
537 **/
538s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw)
539{
540	s32 ret_val;
541	u16 checksum = 0;
542	u16 i, nvm_data;
543
544	DEBUGFUNC("e1000_update_nvm_checksum_i210");
545
546	/*
547	 * Read the first word from the EEPROM. If this times out or fails, do
548	 * not continue or we could be in for a very long wait while every
549	 * EEPROM read fails
550	 */
551	ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data);
552	if (ret_val != E1000_SUCCESS) {
553		DEBUGOUT("EEPROM read failed\n");
554		goto out;
555	}
556
557	if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
558		/*
559		 * Do not use hw->nvm.ops.write, hw->nvm.ops.read
560		 * because we do not want to take the synchronization
561		 * semaphores twice here.
562		 */
563
564		for (i = 0; i < NVM_CHECKSUM_REG; i++) {
565			ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data);
566			if (ret_val) {
567				hw->nvm.ops.release(hw);
568				DEBUGOUT("NVM Read Error while updating checksum.\n");
569				goto out;
570			}
571			checksum += nvm_data;
572		}
573		checksum = (u16) NVM_SUM - checksum;
574		ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
575						&checksum);
576		if (ret_val != E1000_SUCCESS) {
577			hw->nvm.ops.release(hw);
578			DEBUGOUT("NVM Write Error while updating checksum.\n");
579			goto out;
580		}
581
582		hw->nvm.ops.release(hw);
583
584		ret_val = e1000_update_flash_i210(hw);
585	} else {
586		ret_val = E1000_ERR_SWFW_SYNC;
587	}
588out:
589	return ret_val;
590}
591
592/**
593 *  e1000_get_flash_presence_i210 - Check if flash device is detected.
594 *  @hw: pointer to the HW structure
595 *
596 **/
597bool e1000_get_flash_presence_i210(struct e1000_hw *hw)
598{
599	u32 eec = 0;
600	bool ret_val = FALSE;
601
602	DEBUGFUNC("e1000_get_flash_presence_i210");
603
604	eec = E1000_READ_REG(hw, E1000_EECD);
605
606	if (eec & E1000_EECD_FLASH_DETECTED_I210)
607		ret_val = TRUE;
608
609	return ret_val;
610}
611
612/**
613 *  e1000_update_flash_i210 - Commit EEPROM to the flash
614 *  @hw: pointer to the HW structure
615 *
616 **/
617s32 e1000_update_flash_i210(struct e1000_hw *hw)
618{
619	s32 ret_val;
620	u32 flup;
621
622	DEBUGFUNC("e1000_update_flash_i210");
623
624	ret_val = e1000_pool_flash_update_done_i210(hw);
625	if (ret_val == -E1000_ERR_NVM) {
626		DEBUGOUT("Flash update time out\n");
627		goto out;
628	}
629
630	flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210;
631	E1000_WRITE_REG(hw, E1000_EECD, flup);
632
633	ret_val = e1000_pool_flash_update_done_i210(hw);
634	if (ret_val == E1000_SUCCESS)
635		DEBUGOUT("Flash update complete\n");
636	else
637		DEBUGOUT("Flash update time out\n");
638
639out:
640	return ret_val;
641}
642
643/**
644 *  e1000_pool_flash_update_done_i210 - Pool FLUDONE status.
645 *  @hw: pointer to the HW structure
646 *
647 **/
648s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw)
649{
650	s32 ret_val = -E1000_ERR_NVM;
651	u32 i, reg;
652
653	DEBUGFUNC("e1000_pool_flash_update_done_i210");
654
655	for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
656		reg = E1000_READ_REG(hw, E1000_EECD);
657		if (reg & E1000_EECD_FLUDONE_I210) {
658			ret_val = E1000_SUCCESS;
659			break;
660		}
661		usec_delay(5);
662	}
663
664	return ret_val;
665}
666
667/**
668 *  e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers
669 *  @hw: pointer to the HW structure
670 *
671 *  Initialize the i210/i211 NVM parameters and function pointers.
672 **/
673static s32 e1000_init_nvm_params_i210(struct e1000_hw *hw)
674{
675	s32 ret_val;
676	struct e1000_nvm_info *nvm = &hw->nvm;
677
678	DEBUGFUNC("e1000_init_nvm_params_i210");
679
680	ret_val = e1000_init_nvm_params_82575(hw);
681	nvm->ops.acquire = e1000_acquire_nvm_i210;
682	nvm->ops.release = e1000_release_nvm_i210;
683	nvm->ops.valid_led_default = e1000_valid_led_default_i210;
684	if (e1000_get_flash_presence_i210(hw)) {
685		hw->nvm.type = e1000_nvm_flash_hw;
686		nvm->ops.read    = e1000_read_nvm_srrd_i210;
687		nvm->ops.write   = e1000_write_nvm_srwr_i210;
688		nvm->ops.validate = e1000_validate_nvm_checksum_i210;
689		nvm->ops.update   = e1000_update_nvm_checksum_i210;
690	} else {
691		hw->nvm.type = e1000_nvm_invm;
692		nvm->ops.read     = e1000_read_invm_i210;
693		nvm->ops.write    = e1000_null_write_nvm;
694		nvm->ops.validate = e1000_null_ops_generic;
695		nvm->ops.update   = e1000_null_ops_generic;
696	}
697	return ret_val;
698}
699
700/**
701 *  e1000_init_function_pointers_i210 - Init func ptrs.
702 *  @hw: pointer to the HW structure
703 *
704 *  Called to initialize all function pointers and parameters.
705 **/
706void e1000_init_function_pointers_i210(struct e1000_hw *hw)
707{
708	e1000_init_function_pointers_82575(hw);
709	hw->nvm.ops.init_params = e1000_init_nvm_params_i210;
710
711	return;
712}
713
714/**
715 *  e1000_valid_led_default_i210 - Verify a valid default LED config
716 *  @hw: pointer to the HW structure
717 *  @data: pointer to the NVM (EEPROM)
718 *
719 *  Read the EEPROM for the current default LED configuration.  If the
720 *  LED configuration is not valid, set to a valid LED configuration.
721 **/
722static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
723{
724	s32 ret_val;
725
726	DEBUGFUNC("e1000_valid_led_default_i210");
727
728	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
729	if (ret_val) {
730		DEBUGOUT("NVM Read Error\n");
731		goto out;
732	}
733
734	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
735		switch (hw->phy.media_type) {
736		case e1000_media_type_internal_serdes:
737			*data = ID_LED_DEFAULT_I210_SERDES;
738			break;
739		case e1000_media_type_copper:
740		default:
741			*data = ID_LED_DEFAULT_I210;
742			break;
743		}
744	}
745out:
746	return ret_val;
747}
748
749/**
750 *  __e1000_access_xmdio_reg - Read/write XMDIO register
751 *  @hw: pointer to the HW structure
752 *  @address: XMDIO address to program
753 *  @dev_addr: device address to program
754 *  @data: pointer to value to read/write from/to the XMDIO address
755 *  @read: boolean flag to indicate read or write
756 **/
757static s32 __e1000_access_xmdio_reg(struct e1000_hw *hw, u16 address,
758				    u8 dev_addr, u16 *data, bool read)
759{
760	s32 ret_val;
761
762	DEBUGFUNC("__e1000_access_xmdio_reg");
763
764	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
765	if (ret_val)
766		return ret_val;
767
768	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address);
769	if (ret_val)
770		return ret_val;
771
772	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA |
773							 dev_addr);
774	if (ret_val)
775		return ret_val;
776
777	if (read)
778		ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data);
779	else
780		ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data);
781	if (ret_val)
782		return ret_val;
783
784	/* Recalibrate the device back to 0 */
785	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0);
786	if (ret_val)
787		return ret_val;
788
789	return ret_val;
790}
791
792/**
793 *  e1000_read_xmdio_reg - Read XMDIO register
794 *  @hw: pointer to the HW structure
795 *  @addr: XMDIO address to program
796 *  @dev_addr: device address to program
797 *  @data: value to be read from the EMI address
798 **/
799s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data)
800{
801	DEBUGFUNC("e1000_read_xmdio_reg");
802
803	return __e1000_access_xmdio_reg(hw, addr, dev_addr, data, TRUE);
804}
805
806/**
807 *  e1000_write_xmdio_reg - Write XMDIO register
808 *  @hw: pointer to the HW structure
809 *  @addr: XMDIO address to program
810 *  @dev_addr: device address to program
811 *  @data: value to be written to the XMDIO address
812 **/
813s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
814{
815	DEBUGFUNC("e1000_read_xmdio_reg");
816
817	return __e1000_access_xmdio_reg(hw, addr, dev_addr, &data, FALSE);
818}
819
820/**
821 * e1000_pll_workaround_i210
822 * @hw: pointer to the HW structure
823 *
824 * Works around an errata in the PLL circuit where it occasionally
825 * provides the wrong clock frequency after power up.
826 **/
827static s32 e1000_pll_workaround_i210(struct e1000_hw *hw)
828{
829	s32 ret_val;
830	u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
831	u16 nvm_word, phy_word, pci_word, tmp_nvm;
832	int i;
833
834	/* Get and set needed register values */
835	wuc = E1000_READ_REG(hw, E1000_WUC);
836	mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG);
837	reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
838	E1000_WRITE_REG(hw, E1000_MDICNFG, reg_val);
839
840	/* Get data from NVM, or set default */
841	ret_val = e1000_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
842					    &nvm_word);
843	if (ret_val != E1000_SUCCESS)
844		nvm_word = E1000_INVM_DEFAULT_AL;
845	tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
846	for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
847		/* check current state directly from internal PHY */
848		e1000_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
849					 E1000_PHY_PLL_FREQ_REG), &phy_word);
850		if ((phy_word & E1000_PHY_PLL_UNCONF)
851		    != E1000_PHY_PLL_UNCONF) {
852			ret_val = E1000_SUCCESS;
853			break;
854		} else {
855			ret_val = -E1000_ERR_PHY;
856		}
857		/* directly reset the internal PHY */
858		ctrl = E1000_READ_REG(hw, E1000_CTRL);
859		E1000_WRITE_REG(hw, E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
860
861		ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
862		ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
863		E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
864
865		E1000_WRITE_REG(hw, E1000_WUC, 0);
866		reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
867		E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val);
868
869		e1000_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
870		pci_word |= E1000_PCI_PMCSR_D3;
871		e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
872		msec_delay(1);
873		pci_word &= ~E1000_PCI_PMCSR_D3;
874		e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
875		reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
876		E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val);
877
878		/* restore WUC register */
879		E1000_WRITE_REG(hw, E1000_WUC, wuc);
880	}
881	/* restore MDICNFG setting */
882	E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
883	return ret_val;
884}
885
886/**
887 *  e1000_get_cfg_done_i210 - Read config done bit
888 *  @hw: pointer to the HW structure
889 *
890 *  Read the management control register for the config done bit for
891 *  completion status.  NOTE: silicon which is EEPROM-less will fail trying
892 *  to read the config done bit, so an error is *ONLY* logged and returns
893 *  E1000_SUCCESS.  If we were to return with error, EEPROM-less silicon
894 *  would not be able to be reset or change link.
895 **/
896static s32 e1000_get_cfg_done_i210(struct e1000_hw *hw)
897{
898	s32 timeout = PHY_CFG_TIMEOUT;
899	u32 mask = E1000_NVM_CFG_DONE_PORT_0;
900
901	DEBUGFUNC("e1000_get_cfg_done_i210");
902
903	while (timeout) {
904		if (E1000_READ_REG(hw, E1000_EEMNGCTL_I210) & mask)
905			break;
906		msec_delay(1);
907		timeout--;
908	}
909	if (!timeout)
910		DEBUGOUT("MNG configuration cycle has not completed.\n");
911
912	return E1000_SUCCESS;
913}
914
915/**
916 *  e1000_init_hw_i210 - Init hw for I210/I211
917 *  @hw: pointer to the HW structure
918 *
919 *  Called to initialize hw for i210 hw family.
920 **/
921s32 e1000_init_hw_i210(struct e1000_hw *hw)
922{
923	s32 ret_val;
924
925	DEBUGFUNC("e1000_init_hw_i210");
926	if ((hw->mac.type >= e1000_i210) &&
927	    !(e1000_get_flash_presence_i210(hw))) {
928		ret_val = e1000_pll_workaround_i210(hw);
929		if (ret_val != E1000_SUCCESS)
930			return ret_val;
931	}
932	hw->phy.ops.get_cfg_done = e1000_get_cfg_done_i210;
933	ret_val = e1000_init_hw_82575(hw);
934	return ret_val;
935}
936