ixgbe_common.c revision 230775
1104493Smike/******************************************************************************
2232275Stijl
332411Sjb  Copyright (c) 2001-2012, Intel Corporation
4232275Stijl  All rights reserved.
532411Sjb
6232275Stijl  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_common.c 230775 2012-01-30 16:42:02Z jfv $*/
34
35#include "ixgbe_common.h"
36#include "ixgbe_phy.h"
37#include "ixgbe_api.h"
38
39static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
40static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
41static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
42static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
43static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
44static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
45					u16 count);
46static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
47static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
48static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
49static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
50
51static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
52static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
53					 u16 *san_mac_offset);
54static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
55static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
56static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
57static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
58static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
59			      u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
60static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
61static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
62					     u16 words, u16 *data);
63static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
64					      u16 words, u16 *data);
65static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
66						 u16 offset);
67
68/**
69 *  ixgbe_init_ops_generic - Inits function ptrs
70 *  @hw: pointer to the hardware structure
71 *
72 *  Initialize the function pointers.
73 **/
74s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
75{
76	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
77	struct ixgbe_mac_info *mac = &hw->mac;
78	u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
79
80	DEBUGFUNC("ixgbe_init_ops_generic");
81
82	/* EEPROM */
83	eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
84	/* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
85	if (eec & IXGBE_EEC_PRES) {
86		eeprom->ops.read = &ixgbe_read_eerd_generic;
87		eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_generic;
88	} else {
89		eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
90		eeprom->ops.read_buffer =
91				 &ixgbe_read_eeprom_buffer_bit_bang_generic;
92	}
93	eeprom->ops.write = &ixgbe_write_eeprom_generic;
94	eeprom->ops.write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic;
95	eeprom->ops.validate_checksum =
96				      &ixgbe_validate_eeprom_checksum_generic;
97	eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
98	eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic;
99
100	/* MAC */
101	mac->ops.init_hw = &ixgbe_init_hw_generic;
102	mac->ops.reset_hw = NULL;
103	mac->ops.start_hw = &ixgbe_start_hw_generic;
104	mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
105	mac->ops.get_media_type = NULL;
106	mac->ops.get_supported_physical_layer = NULL;
107	mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic;
108	mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
109	mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
110	mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
111	mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
112	mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync;
113	mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync;
114
115	/* LEDs */
116	mac->ops.led_on = &ixgbe_led_on_generic;
117	mac->ops.led_off = &ixgbe_led_off_generic;
118	mac->ops.blink_led_start = &ixgbe_blink_led_start_generic;
119	mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic;
120
121	/* RAR, Multicast, VLAN */
122	mac->ops.set_rar = &ixgbe_set_rar_generic;
123	mac->ops.clear_rar = &ixgbe_clear_rar_generic;
124	mac->ops.insert_mac_addr = NULL;
125	mac->ops.set_vmdq = NULL;
126	mac->ops.clear_vmdq = NULL;
127	mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
128	mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
129	mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
130	mac->ops.enable_mc = &ixgbe_enable_mc_generic;
131	mac->ops.disable_mc = &ixgbe_disable_mc_generic;
132	mac->ops.clear_vfta = NULL;
133	mac->ops.set_vfta = NULL;
134	mac->ops.set_vlvf = NULL;
135	mac->ops.init_uta_tables = NULL;
136
137	/* Flow Control */
138	mac->ops.fc_enable = &ixgbe_fc_enable_generic;
139
140	/* Link */
141	mac->ops.get_link_capabilities = NULL;
142	mac->ops.setup_link = NULL;
143	mac->ops.check_link = NULL;
144
145	return IXGBE_SUCCESS;
146}
147
148/**
149 *  ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
150 *  @hw: pointer to hardware structure
151 *
152 *  Starts the hardware by filling the bus info structure and media type, clears
153 *  all on chip counters, initializes receive address registers, multicast
154 *  table, VLAN filter table, calls routine to set up link and flow control
155 *  settings, and leaves transmit and receive units disabled and uninitialized
156 **/
157s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
158{
159	u32 ctrl_ext;
160
161	DEBUGFUNC("ixgbe_start_hw_generic");
162
163	/* Set the media type */
164	hw->phy.media_type = hw->mac.ops.get_media_type(hw);
165
166	/* PHY ops initialization must be done in reset_hw() */
167
168	/* Clear the VLAN filter table */
169	hw->mac.ops.clear_vfta(hw);
170
171	/* Clear statistics registers */
172	hw->mac.ops.clear_hw_cntrs(hw);
173
174	/* Set No Snoop Disable */
175	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
176	ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
177	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
178	IXGBE_WRITE_FLUSH(hw);
179
180	/* Setup flow control */
181	ixgbe_setup_fc(hw, 0);
182
183	/* Clear adapter stopped flag */
184	hw->adapter_stopped = FALSE;
185
186	return IXGBE_SUCCESS;
187}
188
189/**
190 *  ixgbe_start_hw_gen2 - Init sequence for common device family
191 *  @hw: pointer to hw structure
192 *
193 * Performs the init sequence common to the second generation
194 * of 10 GbE devices.
195 * Devices in the second generation:
196 *     82599
197 *     X540
198 **/
199s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
200{
201	u32 i;
202	u32 regval;
203
204	/* Clear the rate limiters */
205	for (i = 0; i < hw->mac.max_tx_queues; i++) {
206		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
207		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
208	}
209	IXGBE_WRITE_FLUSH(hw);
210
211	/* Disable relaxed ordering */
212	for (i = 0; i < hw->mac.max_tx_queues; i++) {
213		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
214		regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
215		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
216	}
217
218	for (i = 0; i < hw->mac.max_rx_queues; i++) {
219		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
220		regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
221			    IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
222		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
223	}
224
225	return IXGBE_SUCCESS;
226}
227
228/**
229 *  ixgbe_init_hw_generic - Generic hardware initialization
230 *  @hw: pointer to hardware structure
231 *
232 *  Initialize the hardware by resetting the hardware, filling the bus info
233 *  structure and media type, clears all on chip counters, initializes receive
234 *  address registers, multicast table, VLAN filter table, calls routine to set
235 *  up link and flow control settings, and leaves transmit and receive units
236 *  disabled and uninitialized
237 **/
238s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
239{
240	s32 status;
241
242	DEBUGFUNC("ixgbe_init_hw_generic");
243
244	/* Reset the hardware */
245	status = hw->mac.ops.reset_hw(hw);
246
247	if (status == IXGBE_SUCCESS) {
248		/* Start the HW */
249		status = hw->mac.ops.start_hw(hw);
250	}
251
252	return status;
253}
254
255/**
256 *  ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
257 *  @hw: pointer to hardware structure
258 *
259 *  Clears all hardware statistics counters by reading them from the hardware
260 *  Statistics counters are clear on read.
261 **/
262s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
263{
264	u16 i = 0;
265
266	DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
267
268	IXGBE_READ_REG(hw, IXGBE_CRCERRS);
269	IXGBE_READ_REG(hw, IXGBE_ILLERRC);
270	IXGBE_READ_REG(hw, IXGBE_ERRBC);
271	IXGBE_READ_REG(hw, IXGBE_MSPDC);
272	for (i = 0; i < 8; i++)
273		IXGBE_READ_REG(hw, IXGBE_MPC(i));
274
275	IXGBE_READ_REG(hw, IXGBE_MLFC);
276	IXGBE_READ_REG(hw, IXGBE_MRFC);
277	IXGBE_READ_REG(hw, IXGBE_RLEC);
278	IXGBE_READ_REG(hw, IXGBE_LXONTXC);
279	IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
280	if (hw->mac.type >= ixgbe_mac_82599EB) {
281		IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
282		IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
283	} else {
284		IXGBE_READ_REG(hw, IXGBE_LXONRXC);
285		IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
286	}
287
288	for (i = 0; i < 8; i++) {
289		IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
290		IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
291		if (hw->mac.type >= ixgbe_mac_82599EB) {
292			IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
293			IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
294		} else {
295			IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
296			IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
297		}
298	}
299	if (hw->mac.type >= ixgbe_mac_82599EB)
300		for (i = 0; i < 8; i++)
301			IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
302	IXGBE_READ_REG(hw, IXGBE_PRC64);
303	IXGBE_READ_REG(hw, IXGBE_PRC127);
304	IXGBE_READ_REG(hw, IXGBE_PRC255);
305	IXGBE_READ_REG(hw, IXGBE_PRC511);
306	IXGBE_READ_REG(hw, IXGBE_PRC1023);
307	IXGBE_READ_REG(hw, IXGBE_PRC1522);
308	IXGBE_READ_REG(hw, IXGBE_GPRC);
309	IXGBE_READ_REG(hw, IXGBE_BPRC);
310	IXGBE_READ_REG(hw, IXGBE_MPRC);
311	IXGBE_READ_REG(hw, IXGBE_GPTC);
312	IXGBE_READ_REG(hw, IXGBE_GORCL);
313	IXGBE_READ_REG(hw, IXGBE_GORCH);
314	IXGBE_READ_REG(hw, IXGBE_GOTCL);
315	IXGBE_READ_REG(hw, IXGBE_GOTCH);
316	if (hw->mac.type == ixgbe_mac_82598EB)
317		for (i = 0; i < 8; i++)
318			IXGBE_READ_REG(hw, IXGBE_RNBC(i));
319	IXGBE_READ_REG(hw, IXGBE_RUC);
320	IXGBE_READ_REG(hw, IXGBE_RFC);
321	IXGBE_READ_REG(hw, IXGBE_ROC);
322	IXGBE_READ_REG(hw, IXGBE_RJC);
323	IXGBE_READ_REG(hw, IXGBE_MNGPRC);
324	IXGBE_READ_REG(hw, IXGBE_MNGPDC);
325	IXGBE_READ_REG(hw, IXGBE_MNGPTC);
326	IXGBE_READ_REG(hw, IXGBE_TORL);
327	IXGBE_READ_REG(hw, IXGBE_TORH);
328	IXGBE_READ_REG(hw, IXGBE_TPR);
329	IXGBE_READ_REG(hw, IXGBE_TPT);
330	IXGBE_READ_REG(hw, IXGBE_PTC64);
331	IXGBE_READ_REG(hw, IXGBE_PTC127);
332	IXGBE_READ_REG(hw, IXGBE_PTC255);
333	IXGBE_READ_REG(hw, IXGBE_PTC511);
334	IXGBE_READ_REG(hw, IXGBE_PTC1023);
335	IXGBE_READ_REG(hw, IXGBE_PTC1522);
336	IXGBE_READ_REG(hw, IXGBE_MPTC);
337	IXGBE_READ_REG(hw, IXGBE_BPTC);
338	for (i = 0; i < 16; i++) {
339		IXGBE_READ_REG(hw, IXGBE_QPRC(i));
340		IXGBE_READ_REG(hw, IXGBE_QPTC(i));
341		if (hw->mac.type >= ixgbe_mac_82599EB) {
342			IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
343			IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
344			IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
345			IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
346			IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
347		} else {
348			IXGBE_READ_REG(hw, IXGBE_QBRC(i));
349			IXGBE_READ_REG(hw, IXGBE_QBTC(i));
350		}
351	}
352
353	if (hw->mac.type == ixgbe_mac_X540) {
354		if (hw->phy.id == 0)
355			ixgbe_identify_phy(hw);
356		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
357				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
358		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
359				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
360		hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
361				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
362		hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
363				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
364	}
365
366	return IXGBE_SUCCESS;
367}
368
369/**
370 *  ixgbe_read_pba_string_generic - Reads part number string from EEPROM
371 *  @hw: pointer to hardware structure
372 *  @pba_num: stores the part number string from the EEPROM
373 *  @pba_num_size: part number string buffer length
374 *
375 *  Reads the part number string from the EEPROM.
376 **/
377s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
378				  u32 pba_num_size)
379{
380	s32 ret_val;
381	u16 data;
382	u16 pba_ptr;
383	u16 offset;
384	u16 length;
385
386	DEBUGFUNC("ixgbe_read_pba_string_generic");
387
388	if (pba_num == NULL) {
389		DEBUGOUT("PBA string buffer was null\n");
390		return IXGBE_ERR_INVALID_ARGUMENT;
391	}
392
393	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
394	if (ret_val) {
395		DEBUGOUT("NVM Read Error\n");
396		return ret_val;
397	}
398
399	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
400	if (ret_val) {
401		DEBUGOUT("NVM Read Error\n");
402		return ret_val;
403	}
404
405	/*
406	 * if data is not ptr guard the PBA must be in legacy format which
407	 * means pba_ptr is actually our second data word for the PBA number
408	 * and we can decode it into an ascii string
409	 */
410	if (data != IXGBE_PBANUM_PTR_GUARD) {
411		DEBUGOUT("NVM PBA number is not stored as string\n");
412
413		/* we will need 11 characters to store the PBA */
414		if (pba_num_size < 11) {
415			DEBUGOUT("PBA string buffer too small\n");
416			return IXGBE_ERR_NO_SPACE;
417		}
418
419		/* extract hex string from data and pba_ptr */
420		pba_num[0] = (data >> 12) & 0xF;
421		pba_num[1] = (data >> 8) & 0xF;
422		pba_num[2] = (data >> 4) & 0xF;
423		pba_num[3] = data & 0xF;
424		pba_num[4] = (pba_ptr >> 12) & 0xF;
425		pba_num[5] = (pba_ptr >> 8) & 0xF;
426		pba_num[6] = '-';
427		pba_num[7] = 0;
428		pba_num[8] = (pba_ptr >> 4) & 0xF;
429		pba_num[9] = pba_ptr & 0xF;
430
431		/* put a null character on the end of our string */
432		pba_num[10] = '\0';
433
434		/* switch all the data but the '-' to hex char */
435		for (offset = 0; offset < 10; offset++) {
436			if (pba_num[offset] < 0xA)
437				pba_num[offset] += '0';
438			else if (pba_num[offset] < 0x10)
439				pba_num[offset] += 'A' - 0xA;
440		}
441
442		return IXGBE_SUCCESS;
443	}
444
445	ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
446	if (ret_val) {
447		DEBUGOUT("NVM Read Error\n");
448		return ret_val;
449	}
450
451	if (length == 0xFFFF || length == 0) {
452		DEBUGOUT("NVM PBA number section invalid length\n");
453		return IXGBE_ERR_PBA_SECTION;
454	}
455
456	/* check if pba_num buffer is big enough */
457	if (pba_num_size  < (((u32)length * 2) - 1)) {
458		DEBUGOUT("PBA string buffer too small\n");
459		return IXGBE_ERR_NO_SPACE;
460	}
461
462	/* trim pba length from start of string */
463	pba_ptr++;
464	length--;
465
466	for (offset = 0; offset < length; offset++) {
467		ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
468		if (ret_val) {
469			DEBUGOUT("NVM Read Error\n");
470			return ret_val;
471		}
472		pba_num[offset * 2] = (u8)(data >> 8);
473		pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
474	}
475	pba_num[offset * 2] = '\0';
476
477	return IXGBE_SUCCESS;
478}
479
480/**
481 *  ixgbe_read_pba_num_generic - Reads part number from EEPROM
482 *  @hw: pointer to hardware structure
483 *  @pba_num: stores the part number from the EEPROM
484 *
485 *  Reads the part number from the EEPROM.
486 **/
487s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
488{
489	s32 ret_val;
490	u16 data;
491
492	DEBUGFUNC("ixgbe_read_pba_num_generic");
493
494	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
495	if (ret_val) {
496		DEBUGOUT("NVM Read Error\n");
497		return ret_val;
498	} else if (data == IXGBE_PBANUM_PTR_GUARD) {
499		DEBUGOUT("NVM Not supported\n");
500		return IXGBE_NOT_IMPLEMENTED;
501	}
502	*pba_num = (u32)(data << 16);
503
504	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
505	if (ret_val) {
506		DEBUGOUT("NVM Read Error\n");
507		return ret_val;
508	}
509	*pba_num |= data;
510
511	return IXGBE_SUCCESS;
512}
513
514/**
515 *  ixgbe_get_mac_addr_generic - Generic get MAC address
516 *  @hw: pointer to hardware structure
517 *  @mac_addr: Adapter MAC address
518 *
519 *  Reads the adapter's MAC address from first Receive Address Register (RAR0)
520 *  A reset of the adapter must be performed prior to calling this function
521 *  in order for the MAC address to have been loaded from the EEPROM into RAR0
522 **/
523s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
524{
525	u32 rar_high;
526	u32 rar_low;
527	u16 i;
528
529	DEBUGFUNC("ixgbe_get_mac_addr_generic");
530
531	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
532	rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
533
534	for (i = 0; i < 4; i++)
535		mac_addr[i] = (u8)(rar_low >> (i*8));
536
537	for (i = 0; i < 2; i++)
538		mac_addr[i+4] = (u8)(rar_high >> (i*8));
539
540	return IXGBE_SUCCESS;
541}
542
543/**
544 *  ixgbe_get_bus_info_generic - Generic set PCI bus info
545 *  @hw: pointer to hardware structure
546 *
547 *  Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
548 **/
549s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
550{
551	struct ixgbe_mac_info *mac = &hw->mac;
552	u16 link_status;
553
554	DEBUGFUNC("ixgbe_get_bus_info_generic");
555
556	hw->bus.type = ixgbe_bus_type_pci_express;
557
558	/* Get the negotiated link width and speed from PCI config space */
559	link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
560
561	switch (link_status & IXGBE_PCI_LINK_WIDTH) {
562	case IXGBE_PCI_LINK_WIDTH_1:
563		hw->bus.width = ixgbe_bus_width_pcie_x1;
564		break;
565	case IXGBE_PCI_LINK_WIDTH_2:
566		hw->bus.width = ixgbe_bus_width_pcie_x2;
567		break;
568	case IXGBE_PCI_LINK_WIDTH_4:
569		hw->bus.width = ixgbe_bus_width_pcie_x4;
570		break;
571	case IXGBE_PCI_LINK_WIDTH_8:
572		hw->bus.width = ixgbe_bus_width_pcie_x8;
573		break;
574	default:
575		hw->bus.width = ixgbe_bus_width_unknown;
576		break;
577	}
578
579	switch (link_status & IXGBE_PCI_LINK_SPEED) {
580	case IXGBE_PCI_LINK_SPEED_2500:
581		hw->bus.speed = ixgbe_bus_speed_2500;
582		break;
583	case IXGBE_PCI_LINK_SPEED_5000:
584		hw->bus.speed = ixgbe_bus_speed_5000;
585		break;
586	default:
587		hw->bus.speed = ixgbe_bus_speed_unknown;
588		break;
589	}
590
591	mac->ops.set_lan_id(hw);
592
593	return IXGBE_SUCCESS;
594}
595
596/**
597 *  ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
598 *  @hw: pointer to the HW structure
599 *
600 *  Determines the LAN function id by reading memory-mapped registers
601 *  and swaps the port value if requested.
602 **/
603void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
604{
605	struct ixgbe_bus_info *bus = &hw->bus;
606	u32 reg;
607
608	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
609
610	reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
611	bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
612	bus->lan_id = bus->func;
613
614	/* check for a port swap */
615	reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
616	if (reg & IXGBE_FACTPS_LFS)
617		bus->func ^= 0x1;
618}
619
620/**
621 *  ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
622 *  @hw: pointer to hardware structure
623 *
624 *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
625 *  disables transmit and receive units. The adapter_stopped flag is used by
626 *  the shared code and drivers to determine if the adapter is in a stopped
627 *  state and should not touch the hardware.
628 **/
629s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
630{
631	u32 reg_val;
632	u16 i;
633
634	DEBUGFUNC("ixgbe_stop_adapter_generic");
635
636	/*
637	 * Set the adapter_stopped flag so other driver functions stop touching
638	 * the hardware
639	 */
640	hw->adapter_stopped = TRUE;
641
642	/* Disable the receive unit */
643	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
644
645	/* Clear interrupt mask to stop interrupts from being generated */
646	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
647
648	/* Clear any pending interrupts, flush previous writes */
649	IXGBE_READ_REG(hw, IXGBE_EICR);
650
651	/* Disable the transmit unit.  Each queue must be disabled. */
652	for (i = 0; i < hw->mac.max_tx_queues; i++)
653		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
654
655	/* Disable the receive unit by stopping each queue */
656	for (i = 0; i < hw->mac.max_rx_queues; i++) {
657		reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
658		reg_val &= ~IXGBE_RXDCTL_ENABLE;
659		reg_val |= IXGBE_RXDCTL_SWFLSH;
660		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
661	}
662
663	/* flush all queues disables */
664	IXGBE_WRITE_FLUSH(hw);
665	msec_delay(2);
666
667	/*
668	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
669	 * access and verify no pending requests
670	 */
671	return ixgbe_disable_pcie_master(hw);
672}
673
674/**
675 *  ixgbe_led_on_generic - Turns on the software controllable LEDs.
676 *  @hw: pointer to hardware structure
677 *  @index: led number to turn on
678 **/
679s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
680{
681	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
682
683	DEBUGFUNC("ixgbe_led_on_generic");
684
685	/* To turn on the LED, set mode to ON. */
686	led_reg &= ~IXGBE_LED_MODE_MASK(index);
687	led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
688	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
689	IXGBE_WRITE_FLUSH(hw);
690
691	return IXGBE_SUCCESS;
692}
693
694/**
695 *  ixgbe_led_off_generic - Turns off the software controllable LEDs.
696 *  @hw: pointer to hardware structure
697 *  @index: led number to turn off
698 **/
699s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
700{
701	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
702
703	DEBUGFUNC("ixgbe_led_off_generic");
704
705	/* To turn off the LED, set mode to OFF. */
706	led_reg &= ~IXGBE_LED_MODE_MASK(index);
707	led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
708	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
709	IXGBE_WRITE_FLUSH(hw);
710
711	return IXGBE_SUCCESS;
712}
713
714/**
715 *  ixgbe_init_eeprom_params_generic - Initialize EEPROM params
716 *  @hw: pointer to hardware structure
717 *
718 *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
719 *  ixgbe_hw struct in order to set up EEPROM access.
720 **/
721s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
722{
723	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
724	u32 eec;
725	u16 eeprom_size;
726
727	DEBUGFUNC("ixgbe_init_eeprom_params_generic");
728
729	if (eeprom->type == ixgbe_eeprom_uninitialized) {
730		eeprom->type = ixgbe_eeprom_none;
731		/* Set default semaphore delay to 10ms which is a well
732		 * tested value */
733		eeprom->semaphore_delay = 10;
734		/* Clear EEPROM page size, it will be initialized as needed */
735		eeprom->word_page_size = 0;
736
737		/*
738		 * Check for EEPROM present first.
739		 * If not present leave as none
740		 */
741		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
742		if (eec & IXGBE_EEC_PRES) {
743			eeprom->type = ixgbe_eeprom_spi;
744
745			/*
746			 * SPI EEPROM is assumed here.  This code would need to
747			 * change if a future EEPROM is not SPI.
748			 */
749			eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
750					    IXGBE_EEC_SIZE_SHIFT);
751			eeprom->word_size = 1 << (eeprom_size +
752					     IXGBE_EEPROM_WORD_SIZE_SHIFT);
753		}
754
755		if (eec & IXGBE_EEC_ADDR_SIZE)
756			eeprom->address_bits = 16;
757		else
758			eeprom->address_bits = 8;
759		DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
760			  "%d\n", eeprom->type, eeprom->word_size,
761			  eeprom->address_bits);
762	}
763
764	return IXGBE_SUCCESS;
765}
766
767/**
768 *  ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
769 *  @hw: pointer to hardware structure
770 *  @offset: offset within the EEPROM to write
771 *  @words: number of word(s)
772 *  @data: 16 bit word(s) to write to EEPROM
773 *
774 *  Reads 16 bit word(s) from EEPROM through bit-bang method
775 **/
776s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
777					       u16 words, u16 *data)
778{
779	s32 status = IXGBE_SUCCESS;
780	u16 i, count;
781
782	DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
783
784	hw->eeprom.ops.init_params(hw);
785
786	if (words == 0) {
787		status = IXGBE_ERR_INVALID_ARGUMENT;
788		goto out;
789	}
790
791	if (offset + words > hw->eeprom.word_size) {
792		status = IXGBE_ERR_EEPROM;
793		goto out;
794	}
795
796	/*
797	 * The EEPROM page size cannot be queried from the chip. We do lazy
798	 * initialization. It is worth to do that when we write large buffer.
799	 */
800	if ((hw->eeprom.word_page_size == 0) &&
801	    (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
802		ixgbe_detect_eeprom_page_size_generic(hw, offset);
803
804	/*
805	 * We cannot hold synchronization semaphores for too long
806	 * to avoid other entity starvation. However it is more efficient
807	 * to read in bursts than synchronizing access for each word.
808	 */
809	for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
810		count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
811			IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
812		status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
813							    count, &data[i]);
814
815		if (status != IXGBE_SUCCESS)
816			break;
817	}
818
819out:
820	return status;
821}
822
823/**
824 *  ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
825 *  @hw: pointer to hardware structure
826 *  @offset: offset within the EEPROM to be written to
827 *  @words: number of word(s)
828 *  @data: 16 bit word(s) to be written to the EEPROM
829 *
830 *  If ixgbe_eeprom_update_checksum is not called after this function, the
831 *  EEPROM will most likely contain an invalid checksum.
832 **/
833static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
834					      u16 words, u16 *data)
835{
836	s32 status;
837	u16 word;
838	u16 page_size;
839	u16 i;
840	u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
841
842	DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
843
844	/* Prepare the EEPROM for writing  */
845	status = ixgbe_acquire_eeprom(hw);
846
847	if (status == IXGBE_SUCCESS) {
848		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
849			ixgbe_release_eeprom(hw);
850			status = IXGBE_ERR_EEPROM;
851		}
852	}
853
854	if (status == IXGBE_SUCCESS) {
855		for (i = 0; i < words; i++) {
856			ixgbe_standby_eeprom(hw);
857
858			/*  Send the WRITE ENABLE command (8 bit opcode )  */
859			ixgbe_shift_out_eeprom_bits(hw,
860						   IXGBE_EEPROM_WREN_OPCODE_SPI,
861						   IXGBE_EEPROM_OPCODE_BITS);
862
863			ixgbe_standby_eeprom(hw);
864
865			/*
866			 * Some SPI eeproms use the 8th address bit embedded
867			 * in the opcode
868			 */
869			if ((hw->eeprom.address_bits == 8) &&
870			    ((offset + i) >= 128))
871				write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
872
873			/* Send the Write command (8-bit opcode + addr) */
874			ixgbe_shift_out_eeprom_bits(hw, write_opcode,
875						    IXGBE_EEPROM_OPCODE_BITS);
876			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
877						    hw->eeprom.address_bits);
878
879			page_size = hw->eeprom.word_page_size;
880
881			/* Send the data in burst via SPI*/
882			do {
883				word = data[i];
884				word = (word >> 8) | (word << 8);
885				ixgbe_shift_out_eeprom_bits(hw, word, 16);
886
887				if (page_size == 0)
888					break;
889
890				/* do not wrap around page */
891				if (((offset + i) & (page_size - 1)) ==
892				    (page_size - 1))
893					break;
894			} while (++i < words);
895
896			ixgbe_standby_eeprom(hw);
897			msec_delay(10);
898		}
899		/* Done with writing - release the EEPROM */
900		ixgbe_release_eeprom(hw);
901	}
902
903	return status;
904}
905
906/**
907 *  ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
908 *  @hw: pointer to hardware structure
909 *  @offset: offset within the EEPROM to be written to
910 *  @data: 16 bit word to be written to the EEPROM
911 *
912 *  If ixgbe_eeprom_update_checksum is not called after this function, the
913 *  EEPROM will most likely contain an invalid checksum.
914 **/
915s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
916{
917	s32 status;
918
919	DEBUGFUNC("ixgbe_write_eeprom_generic");
920
921	hw->eeprom.ops.init_params(hw);
922
923	if (offset >= hw->eeprom.word_size) {
924		status = IXGBE_ERR_EEPROM;
925		goto out;
926	}
927
928	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
929
930out:
931	return status;
932}
933
934/**
935 *  ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
936 *  @hw: pointer to hardware structure
937 *  @offset: offset within the EEPROM to be read
938 *  @data: read 16 bit words(s) from EEPROM
939 *  @words: number of word(s)
940 *
941 *  Reads 16 bit word(s) from EEPROM through bit-bang method
942 **/
943s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
944					      u16 words, u16 *data)
945{
946	s32 status = IXGBE_SUCCESS;
947	u16 i, count;
948
949	DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
950
951	hw->eeprom.ops.init_params(hw);
952
953	if (words == 0) {
954		status = IXGBE_ERR_INVALID_ARGUMENT;
955		goto out;
956	}
957
958	if (offset + words > hw->eeprom.word_size) {
959		status = IXGBE_ERR_EEPROM;
960		goto out;
961	}
962
963	/*
964	 * We cannot hold synchronization semaphores for too long
965	 * to avoid other entity starvation. However it is more efficient
966	 * to read in bursts than synchronizing access for each word.
967	 */
968	for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
969		count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
970			IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
971
972		status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
973							   count, &data[i]);
974
975		if (status != IXGBE_SUCCESS)
976			break;
977	}
978
979out:
980	return status;
981}
982
983/**
984 *  ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
985 *  @hw: pointer to hardware structure
986 *  @offset: offset within the EEPROM to be read
987 *  @words: number of word(s)
988 *  @data: read 16 bit word(s) from EEPROM
989 *
990 *  Reads 16 bit word(s) from EEPROM through bit-bang method
991 **/
992static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
993					     u16 words, u16 *data)
994{
995	s32 status;
996	u16 word_in;
997	u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
998	u16 i;
999
1000	DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1001
1002	/* Prepare the EEPROM for reading  */
1003	status = ixgbe_acquire_eeprom(hw);
1004
1005	if (status == IXGBE_SUCCESS) {
1006		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1007			ixgbe_release_eeprom(hw);
1008			status = IXGBE_ERR_EEPROM;
1009		}
1010	}
1011
1012	if (status == IXGBE_SUCCESS) {
1013		for (i = 0; i < words; i++) {
1014			ixgbe_standby_eeprom(hw);
1015			/*
1016			 * Some SPI eeproms use the 8th address bit embedded
1017			 * in the opcode
1018			 */
1019			if ((hw->eeprom.address_bits == 8) &&
1020			    ((offset + i) >= 128))
1021				read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1022
1023			/* Send the READ command (opcode + addr) */
1024			ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1025						    IXGBE_EEPROM_OPCODE_BITS);
1026			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1027						    hw->eeprom.address_bits);
1028
1029			/* Read the data. */
1030			word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1031			data[i] = (word_in >> 8) | (word_in << 8);
1032		}
1033
1034		/* End this read operation */
1035		ixgbe_release_eeprom(hw);
1036	}
1037
1038	return status;
1039}
1040
1041/**
1042 *  ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1043 *  @hw: pointer to hardware structure
1044 *  @offset: offset within the EEPROM to be read
1045 *  @data: read 16 bit value from EEPROM
1046 *
1047 *  Reads 16 bit value from EEPROM through bit-bang method
1048 **/
1049s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1050				       u16 *data)
1051{
1052	s32 status;
1053
1054	DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1055
1056	hw->eeprom.ops.init_params(hw);
1057
1058	if (offset >= hw->eeprom.word_size) {
1059		status = IXGBE_ERR_EEPROM;
1060		goto out;
1061	}
1062
1063	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1064
1065out:
1066	return status;
1067}
1068
1069/**
1070 *  ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1071 *  @hw: pointer to hardware structure
1072 *  @offset: offset of word in the EEPROM to read
1073 *  @words: number of word(s)
1074 *  @data: 16 bit word(s) from the EEPROM
1075 *
1076 *  Reads a 16 bit word(s) from the EEPROM using the EERD register.
1077 **/
1078s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1079				   u16 words, u16 *data)
1080{
1081	u32 eerd;
1082	s32 status = IXGBE_SUCCESS;
1083	u32 i;
1084
1085	DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1086
1087	hw->eeprom.ops.init_params(hw);
1088
1089	if (words == 0) {
1090		status = IXGBE_ERR_INVALID_ARGUMENT;
1091		goto out;
1092	}
1093
1094	if (offset >= hw->eeprom.word_size) {
1095		status = IXGBE_ERR_EEPROM;
1096		goto out;
1097	}
1098
1099	for (i = 0; i < words; i++) {
1100		eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) +
1101		       IXGBE_EEPROM_RW_REG_START;
1102
1103		IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1104		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1105
1106		if (status == IXGBE_SUCCESS) {
1107			data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1108				   IXGBE_EEPROM_RW_REG_DATA);
1109		} else {
1110			DEBUGOUT("Eeprom read timed out\n");
1111			goto out;
1112		}
1113	}
1114out:
1115	return status;
1116}
1117
1118/**
1119 *  ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1120 *  @hw: pointer to hardware structure
1121 *  @offset: offset within the EEPROM to be used as a scratch pad
1122 *
1123 *  Discover EEPROM page size by writing marching data at given offset.
1124 *  This function is called only when we are writing a new large buffer
1125 *  at given offset so the data would be overwritten anyway.
1126 **/
1127static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1128						 u16 offset)
1129{
1130	u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1131	s32 status = IXGBE_SUCCESS;
1132	u16 i;
1133
1134	DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1135
1136	for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1137		data[i] = i;
1138
1139	hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1140	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1141					     IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1142	hw->eeprom.word_page_size = 0;
1143	if (status != IXGBE_SUCCESS)
1144		goto out;
1145
1146	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1147	if (status != IXGBE_SUCCESS)
1148		goto out;
1149
1150	/*
1151	 * When writing in burst more than the actual page size
1152	 * EEPROM address wraps around current page.
1153	 */
1154	hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1155
1156	DEBUGOUT1("Detected EEPROM page size = %d words.",
1157		  hw->eeprom.word_page_size);
1158out:
1159	return status;
1160}
1161
1162/**
1163 *  ixgbe_read_eerd_generic - Read EEPROM word using EERD
1164 *  @hw: pointer to hardware structure
1165 *  @offset: offset of  word in the EEPROM to read
1166 *  @data: word read from the EEPROM
1167 *
1168 *  Reads a 16 bit word from the EEPROM using the EERD register.
1169 **/
1170s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1171{
1172	return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1173}
1174
1175/**
1176 *  ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1177 *  @hw: pointer to hardware structure
1178 *  @offset: offset of  word in the EEPROM to write
1179 *  @words: number of word(s)
1180 *  @data: word(s) write to the EEPROM
1181 *
1182 *  Write a 16 bit word(s) to the EEPROM using the EEWR register.
1183 **/
1184s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1185				    u16 words, u16 *data)
1186{
1187	u32 eewr;
1188	s32 status = IXGBE_SUCCESS;
1189	u16 i;
1190
1191	DEBUGFUNC("ixgbe_write_eewr_generic");
1192
1193	hw->eeprom.ops.init_params(hw);
1194
1195	if (words == 0) {
1196		status = IXGBE_ERR_INVALID_ARGUMENT;
1197		goto out;
1198	}
1199
1200	if (offset >= hw->eeprom.word_size) {
1201		status = IXGBE_ERR_EEPROM;
1202		goto out;
1203	}
1204
1205	for (i = 0; i < words; i++) {
1206		eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1207			(data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1208			IXGBE_EEPROM_RW_REG_START;
1209
1210		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1211		if (status != IXGBE_SUCCESS) {
1212			DEBUGOUT("Eeprom write EEWR timed out\n");
1213			goto out;
1214		}
1215
1216		IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1217
1218		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1219		if (status != IXGBE_SUCCESS) {
1220			DEBUGOUT("Eeprom write EEWR timed out\n");
1221			goto out;
1222		}
1223	}
1224
1225out:
1226	return status;
1227}
1228
1229/**
1230 *  ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1231 *  @hw: pointer to hardware structure
1232 *  @offset: offset of  word in the EEPROM to write
1233 *  @data: word write to the EEPROM
1234 *
1235 *  Write a 16 bit word to the EEPROM using the EEWR register.
1236 **/
1237s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1238{
1239	return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1240}
1241
1242/**
1243 *  ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1244 *  @hw: pointer to hardware structure
1245 *  @ee_reg: EEPROM flag for polling
1246 *
1247 *  Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1248 *  read or write is done respectively.
1249 **/
1250s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1251{
1252	u32 i;
1253	u32 reg;
1254	s32 status = IXGBE_ERR_EEPROM;
1255
1256	DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1257
1258	for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1259		if (ee_reg == IXGBE_NVM_POLL_READ)
1260			reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1261		else
1262			reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1263
1264		if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1265			status = IXGBE_SUCCESS;
1266			break;
1267		}
1268		usec_delay(5);
1269	}
1270	return status;
1271}
1272
1273/**
1274 *  ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1275 *  @hw: pointer to hardware structure
1276 *
1277 *  Prepares EEPROM for access using bit-bang method. This function should
1278 *  be called before issuing a command to the EEPROM.
1279 **/
1280static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1281{
1282	s32 status = IXGBE_SUCCESS;
1283	u32 eec;
1284	u32 i;
1285
1286	DEBUGFUNC("ixgbe_acquire_eeprom");
1287
1288	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1289	    != IXGBE_SUCCESS)
1290		status = IXGBE_ERR_SWFW_SYNC;
1291
1292	if (status == IXGBE_SUCCESS) {
1293		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1294
1295		/* Request EEPROM Access */
1296		eec |= IXGBE_EEC_REQ;
1297		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1298
1299		for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1300			eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1301			if (eec & IXGBE_EEC_GNT)
1302				break;
1303			usec_delay(5);
1304		}
1305
1306		/* Release if grant not acquired */
1307		if (!(eec & IXGBE_EEC_GNT)) {
1308			eec &= ~IXGBE_EEC_REQ;
1309			IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1310			DEBUGOUT("Could not acquire EEPROM grant\n");
1311
1312			hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1313			status = IXGBE_ERR_EEPROM;
1314		}
1315
1316		/* Setup EEPROM for Read/Write */
1317		if (status == IXGBE_SUCCESS) {
1318			/* Clear CS and SK */
1319			eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1320			IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1321			IXGBE_WRITE_FLUSH(hw);
1322			usec_delay(1);
1323		}
1324	}
1325	return status;
1326}
1327
1328/**
1329 *  ixgbe_get_eeprom_semaphore - Get hardware semaphore
1330 *  @hw: pointer to hardware structure
1331 *
1332 *  Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1333 **/
1334static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1335{
1336	s32 status = IXGBE_ERR_EEPROM;
1337	u32 timeout = 2000;
1338	u32 i;
1339	u32 swsm;
1340
1341	DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1342
1343
1344	/* Get SMBI software semaphore between device drivers first */
1345	for (i = 0; i < timeout; i++) {
1346		/*
1347		 * If the SMBI bit is 0 when we read it, then the bit will be
1348		 * set and we have the semaphore
1349		 */
1350		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1351		if (!(swsm & IXGBE_SWSM_SMBI)) {
1352			status = IXGBE_SUCCESS;
1353			break;
1354		}
1355		usec_delay(50);
1356	}
1357
1358	if (i == timeout) {
1359		DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1360			 "not granted.\n");
1361		/*
1362		 * this release is particularly important because our attempts
1363		 * above to get the semaphore may have succeeded, and if there
1364		 * was a timeout, we should unconditionally clear the semaphore
1365		 * bits to free the driver to make progress
1366		 */
1367		ixgbe_release_eeprom_semaphore(hw);
1368
1369		usec_delay(50);
1370		/*
1371		 * one last try
1372		 * If the SMBI bit is 0 when we read it, then the bit will be
1373		 * set and we have the semaphore
1374		 */
1375		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1376		if (!(swsm & IXGBE_SWSM_SMBI))
1377			status = IXGBE_SUCCESS;
1378	}
1379
1380	/* Now get the semaphore between SW/FW through the SWESMBI bit */
1381	if (status == IXGBE_SUCCESS) {
1382		for (i = 0; i < timeout; i++) {
1383			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1384
1385			/* Set the SW EEPROM semaphore bit to request access */
1386			swsm |= IXGBE_SWSM_SWESMBI;
1387			IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1388
1389			/*
1390			 * If we set the bit successfully then we got the
1391			 * semaphore.
1392			 */
1393			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1394			if (swsm & IXGBE_SWSM_SWESMBI)
1395				break;
1396
1397			usec_delay(50);
1398		}
1399
1400		/*
1401		 * Release semaphores and return error if SW EEPROM semaphore
1402		 * was not granted because we don't have access to the EEPROM
1403		 */
1404		if (i >= timeout) {
1405			DEBUGOUT("SWESMBI Software EEPROM semaphore "
1406				 "not granted.\n");
1407			ixgbe_release_eeprom_semaphore(hw);
1408			status = IXGBE_ERR_EEPROM;
1409		}
1410	} else {
1411		DEBUGOUT("Software semaphore SMBI between device drivers "
1412			 "not granted.\n");
1413	}
1414
1415	return status;
1416}
1417
1418/**
1419 *  ixgbe_release_eeprom_semaphore - Release hardware semaphore
1420 *  @hw: pointer to hardware structure
1421 *
1422 *  This function clears hardware semaphore bits.
1423 **/
1424static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1425{
1426	u32 swsm;
1427
1428	DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1429
1430	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1431
1432	/* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1433	swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1434	IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1435	IXGBE_WRITE_FLUSH(hw);
1436}
1437
1438/**
1439 *  ixgbe_ready_eeprom - Polls for EEPROM ready
1440 *  @hw: pointer to hardware structure
1441 **/
1442static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1443{
1444	s32 status = IXGBE_SUCCESS;
1445	u16 i;
1446	u8 spi_stat_reg;
1447
1448	DEBUGFUNC("ixgbe_ready_eeprom");
1449
1450	/*
1451	 * Read "Status Register" repeatedly until the LSB is cleared.  The
1452	 * EEPROM will signal that the command has been completed by clearing
1453	 * bit 0 of the internal status register.  If it's not cleared within
1454	 * 5 milliseconds, then error out.
1455	 */
1456	for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1457		ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1458					    IXGBE_EEPROM_OPCODE_BITS);
1459		spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1460		if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1461			break;
1462
1463		usec_delay(5);
1464		ixgbe_standby_eeprom(hw);
1465	};
1466
1467	/*
1468	 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1469	 * devices (and only 0-5mSec on 5V devices)
1470	 */
1471	if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1472		DEBUGOUT("SPI EEPROM Status error\n");
1473		status = IXGBE_ERR_EEPROM;
1474	}
1475
1476	return status;
1477}
1478
1479/**
1480 *  ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1481 *  @hw: pointer to hardware structure
1482 **/
1483static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1484{
1485	u32 eec;
1486
1487	DEBUGFUNC("ixgbe_standby_eeprom");
1488
1489	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1490
1491	/* Toggle CS to flush commands */
1492	eec |= IXGBE_EEC_CS;
1493	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1494	IXGBE_WRITE_FLUSH(hw);
1495	usec_delay(1);
1496	eec &= ~IXGBE_EEC_CS;
1497	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1498	IXGBE_WRITE_FLUSH(hw);
1499	usec_delay(1);
1500}
1501
1502/**
1503 *  ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
1504 *  @hw: pointer to hardware structure
1505 *  @data: data to send to the EEPROM
1506 *  @count: number of bits to shift out
1507 **/
1508static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
1509					u16 count)
1510{
1511	u32 eec;
1512	u32 mask;
1513	u32 i;
1514
1515	DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
1516
1517	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1518
1519	/*
1520	 * Mask is used to shift "count" bits of "data" out to the EEPROM
1521	 * one bit at a time.  Determine the starting bit based on count
1522	 */
1523	mask = 0x01 << (count - 1);
1524
1525	for (i = 0; i < count; i++) {
1526		/*
1527		 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
1528		 * "1", and then raising and then lowering the clock (the SK
1529		 * bit controls the clock input to the EEPROM).  A "0" is
1530		 * shifted out to the EEPROM by setting "DI" to "0" and then
1531		 * raising and then lowering the clock.
1532		 */
1533		if (data & mask)
1534			eec |= IXGBE_EEC_DI;
1535		else
1536			eec &= ~IXGBE_EEC_DI;
1537
1538		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1539		IXGBE_WRITE_FLUSH(hw);
1540
1541		usec_delay(1);
1542
1543		ixgbe_raise_eeprom_clk(hw, &eec);
1544		ixgbe_lower_eeprom_clk(hw, &eec);
1545
1546		/*
1547		 * Shift mask to signify next bit of data to shift in to the
1548		 * EEPROM
1549		 */
1550		mask = mask >> 1;
1551	};
1552
1553	/* We leave the "DI" bit set to "0" when we leave this routine. */
1554	eec &= ~IXGBE_EEC_DI;
1555	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1556	IXGBE_WRITE_FLUSH(hw);
1557}
1558
1559/**
1560 *  ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
1561 *  @hw: pointer to hardware structure
1562 **/
1563static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
1564{
1565	u32 eec;
1566	u32 i;
1567	u16 data = 0;
1568
1569	DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
1570
1571	/*
1572	 * In order to read a register from the EEPROM, we need to shift
1573	 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
1574	 * the clock input to the EEPROM (setting the SK bit), and then reading
1575	 * the value of the "DO" bit.  During this "shifting in" process the
1576	 * "DI" bit should always be clear.
1577	 */
1578	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1579
1580	eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
1581
1582	for (i = 0; i < count; i++) {
1583		data = data << 1;
1584		ixgbe_raise_eeprom_clk(hw, &eec);
1585
1586		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1587
1588		eec &= ~(IXGBE_EEC_DI);
1589		if (eec & IXGBE_EEC_DO)
1590			data |= 1;
1591
1592		ixgbe_lower_eeprom_clk(hw, &eec);
1593	}
1594
1595	return data;
1596}
1597
1598/**
1599 *  ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
1600 *  @hw: pointer to hardware structure
1601 *  @eec: EEC register's current value
1602 **/
1603static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1604{
1605	DEBUGFUNC("ixgbe_raise_eeprom_clk");
1606
1607	/*
1608	 * Raise the clock input to the EEPROM
1609	 * (setting the SK bit), then delay
1610	 */
1611	*eec = *eec | IXGBE_EEC_SK;
1612	IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1613	IXGBE_WRITE_FLUSH(hw);
1614	usec_delay(1);
1615}
1616
1617/**
1618 *  ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
1619 *  @hw: pointer to hardware structure
1620 *  @eecd: EECD's current value
1621 **/
1622static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1623{
1624	DEBUGFUNC("ixgbe_lower_eeprom_clk");
1625
1626	/*
1627	 * Lower the clock input to the EEPROM (clearing the SK bit), then
1628	 * delay
1629	 */
1630	*eec = *eec & ~IXGBE_EEC_SK;
1631	IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1632	IXGBE_WRITE_FLUSH(hw);
1633	usec_delay(1);
1634}
1635
1636/**
1637 *  ixgbe_release_eeprom - Release EEPROM, release semaphores
1638 *  @hw: pointer to hardware structure
1639 **/
1640static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1641{
1642	u32 eec;
1643
1644	DEBUGFUNC("ixgbe_release_eeprom");
1645
1646	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1647
1648	eec |= IXGBE_EEC_CS;  /* Pull CS high */
1649	eec &= ~IXGBE_EEC_SK; /* Lower SCK */
1650
1651	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1652	IXGBE_WRITE_FLUSH(hw);
1653
1654	usec_delay(1);
1655
1656	/* Stop requesting EEPROM access */
1657	eec &= ~IXGBE_EEC_REQ;
1658	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1659
1660	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1661
1662	/* Delay before attempt to obtain semaphore again to allow FW access */
1663	msec_delay(hw->eeprom.semaphore_delay);
1664}
1665
1666/**
1667 *  ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
1668 *  @hw: pointer to hardware structure
1669 **/
1670u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1671{
1672	u16 i;
1673	u16 j;
1674	u16 checksum = 0;
1675	u16 length = 0;
1676	u16 pointer = 0;
1677	u16 word = 0;
1678
1679	DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
1680
1681	/* Include 0x0-0x3F in the checksum */
1682	for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
1683		if (hw->eeprom.ops.read(hw, i, &word) != IXGBE_SUCCESS) {
1684			DEBUGOUT("EEPROM read failed\n");
1685			break;
1686		}
1687		checksum += word;
1688	}
1689
1690	/* Include all data from pointers except for the fw pointer */
1691	for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
1692		hw->eeprom.ops.read(hw, i, &pointer);
1693
1694		/* Make sure the pointer seems valid */
1695		if (pointer != 0xFFFF && pointer != 0) {
1696			hw->eeprom.ops.read(hw, pointer, &length);
1697
1698			if (length != 0xFFFF && length != 0) {
1699				for (j = pointer+1; j <= pointer+length; j++) {
1700					hw->eeprom.ops.read(hw, j, &word);
1701					checksum += word;
1702				}
1703			}
1704		}
1705	}
1706
1707	checksum = (u16)IXGBE_EEPROM_SUM - checksum;
1708
1709	return checksum;
1710}
1711
1712/**
1713 *  ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
1714 *  @hw: pointer to hardware structure
1715 *  @checksum_val: calculated checksum
1716 *
1717 *  Performs checksum calculation and validates the EEPROM checksum.  If the
1718 *  caller does not need checksum_val, the value can be NULL.
1719 **/
1720s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1721					   u16 *checksum_val)
1722{
1723	s32 status;
1724	u16 checksum;
1725	u16 read_checksum = 0;
1726
1727	DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
1728
1729	/*
1730	 * Read the first word from the EEPROM. If this times out or fails, do
1731	 * not continue or we could be in for a very long wait while every
1732	 * EEPROM read fails
1733	 */
1734	status = hw->eeprom.ops.read(hw, 0, &checksum);
1735
1736	if (status == IXGBE_SUCCESS) {
1737		checksum = hw->eeprom.ops.calc_checksum(hw);
1738
1739		hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
1740
1741		/*
1742		 * Verify read checksum from EEPROM is the same as
1743		 * calculated checksum
1744		 */
1745		if (read_checksum != checksum)
1746			status = IXGBE_ERR_EEPROM_CHECKSUM;
1747
1748		/* If the user cares, return the calculated checksum */
1749		if (checksum_val)
1750			*checksum_val = checksum;
1751	} else {
1752		DEBUGOUT("EEPROM read failed\n");
1753	}
1754
1755	return status;
1756}
1757
1758/**
1759 *  ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
1760 *  @hw: pointer to hardware structure
1761 **/
1762s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1763{
1764	s32 status;
1765	u16 checksum;
1766
1767	DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
1768
1769	/*
1770	 * Read the first word from the EEPROM. If this times out or fails, do
1771	 * not continue or we could be in for a very long wait while every
1772	 * EEPROM read fails
1773	 */
1774	status = hw->eeprom.ops.read(hw, 0, &checksum);
1775
1776	if (status == IXGBE_SUCCESS) {
1777		checksum = hw->eeprom.ops.calc_checksum(hw);
1778		status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
1779					      checksum);
1780	} else {
1781		DEBUGOUT("EEPROM read failed\n");
1782	}
1783
1784	return status;
1785}
1786
1787/**
1788 *  ixgbe_validate_mac_addr - Validate MAC address
1789 *  @mac_addr: pointer to MAC address.
1790 *
1791 *  Tests a MAC address to ensure it is a valid Individual Address
1792 **/
1793s32 ixgbe_validate_mac_addr(u8 *mac_addr)
1794{
1795	s32 status = IXGBE_SUCCESS;
1796
1797	DEBUGFUNC("ixgbe_validate_mac_addr");
1798
1799	/* Make sure it is not a multicast address */
1800	if (IXGBE_IS_MULTICAST(mac_addr)) {
1801		DEBUGOUT("MAC address is multicast\n");
1802		status = IXGBE_ERR_INVALID_MAC_ADDR;
1803	/* Not a broadcast address */
1804	} else if (IXGBE_IS_BROADCAST(mac_addr)) {
1805		DEBUGOUT("MAC address is broadcast\n");
1806		status = IXGBE_ERR_INVALID_MAC_ADDR;
1807	/* Reject the zero address */
1808	} else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
1809		   mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
1810		DEBUGOUT("MAC address is all zeros\n");
1811		status = IXGBE_ERR_INVALID_MAC_ADDR;
1812	}
1813	return status;
1814}
1815
1816/**
1817 *  ixgbe_set_rar_generic - Set Rx address register
1818 *  @hw: pointer to hardware structure
1819 *  @index: Receive address register to write
1820 *  @addr: Address to put into receive address register
1821 *  @vmdq: VMDq "set" or "pool" index
1822 *  @enable_addr: set flag that address is active
1823 *
1824 *  Puts an ethernet address into a receive address register.
1825 **/
1826s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
1827			  u32 enable_addr)
1828{
1829	u32 rar_low, rar_high;
1830	u32 rar_entries = hw->mac.num_rar_entries;
1831
1832	DEBUGFUNC("ixgbe_set_rar_generic");
1833
1834	/* Make sure we are using a valid rar index range */
1835	if (index >= rar_entries) {
1836		DEBUGOUT1("RAR index %d is out of range.\n", index);
1837		return IXGBE_ERR_INVALID_ARGUMENT;
1838	}
1839
1840	/* setup VMDq pool selection before this RAR gets enabled */
1841	hw->mac.ops.set_vmdq(hw, index, vmdq);
1842
1843	/*
1844	 * HW expects these in little endian so we reverse the byte
1845	 * order from network order (big endian) to little endian
1846	 */
1847	rar_low = ((u32)addr[0] |
1848		   ((u32)addr[1] << 8) |
1849		   ((u32)addr[2] << 16) |
1850		   ((u32)addr[3] << 24));
1851	/*
1852	 * Some parts put the VMDq setting in the extra RAH bits,
1853	 * so save everything except the lower 16 bits that hold part
1854	 * of the address and the address valid bit.
1855	 */
1856	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1857	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1858	rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
1859
1860	if (enable_addr != 0)
1861		rar_high |= IXGBE_RAH_AV;
1862
1863	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
1864	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1865
1866	return IXGBE_SUCCESS;
1867}
1868
1869/**
1870 *  ixgbe_clear_rar_generic - Remove Rx address register
1871 *  @hw: pointer to hardware structure
1872 *  @index: Receive address register to write
1873 *
1874 *  Clears an ethernet address from a receive address register.
1875 **/
1876s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
1877{
1878	u32 rar_high;
1879	u32 rar_entries = hw->mac.num_rar_entries;
1880
1881	DEBUGFUNC("ixgbe_clear_rar_generic");
1882
1883	/* Make sure we are using a valid rar index range */
1884	if (index >= rar_entries) {
1885		DEBUGOUT1("RAR index %d is out of range.\n", index);
1886		return IXGBE_ERR_INVALID_ARGUMENT;
1887	}
1888
1889	/*
1890	 * Some parts put the VMDq setting in the extra RAH bits,
1891	 * so save everything except the lower 16 bits that hold part
1892	 * of the address and the address valid bit.
1893	 */
1894	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1895	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1896
1897	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1898	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1899
1900	/* clear VMDq pool/queue selection for this RAR */
1901	hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
1902
1903	return IXGBE_SUCCESS;
1904}
1905
1906/**
1907 *  ixgbe_init_rx_addrs_generic - Initializes receive address filters.
1908 *  @hw: pointer to hardware structure
1909 *
1910 *  Places the MAC address in receive address register 0 and clears the rest
1911 *  of the receive address registers. Clears the multicast table. Assumes
1912 *  the receiver is in reset when the routine is called.
1913 **/
1914s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1915{
1916	u32 i;
1917	u32 rar_entries = hw->mac.num_rar_entries;
1918
1919	DEBUGFUNC("ixgbe_init_rx_addrs_generic");
1920
1921	/*
1922	 * If the current mac address is valid, assume it is a software override
1923	 * to the permanent address.
1924	 * Otherwise, use the permanent address from the eeprom.
1925	 */
1926	if (ixgbe_validate_mac_addr(hw->mac.addr) ==
1927	    IXGBE_ERR_INVALID_MAC_ADDR) {
1928		/* Get the MAC address from the RAR0 for later reference */
1929		hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
1930
1931		DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
1932			  hw->mac.addr[0], hw->mac.addr[1],
1933			  hw->mac.addr[2]);
1934		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
1935			  hw->mac.addr[4], hw->mac.addr[5]);
1936	} else {
1937		/* Setup the receive address. */
1938		DEBUGOUT("Overriding MAC Address in RAR[0]\n");
1939		DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
1940			  hw->mac.addr[0], hw->mac.addr[1],
1941			  hw->mac.addr[2]);
1942		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
1943			  hw->mac.addr[4], hw->mac.addr[5]);
1944
1945		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1946
1947		/* clear VMDq pool/queue selection for RAR 0 */
1948		hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
1949	}
1950	hw->addr_ctrl.overflow_promisc = 0;
1951
1952	hw->addr_ctrl.rar_used_count = 1;
1953
1954	/* Zero out the other receive addresses. */
1955	DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
1956	for (i = 1; i < rar_entries; i++) {
1957		IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
1958		IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
1959	}
1960
1961	/* Clear the MTA */
1962	hw->addr_ctrl.mta_in_use = 0;
1963	IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1964
1965	DEBUGOUT(" Clearing MTA\n");
1966	for (i = 0; i < hw->mac.mcft_size; i++)
1967		IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
1968
1969	ixgbe_init_uta_tables(hw);
1970
1971	return IXGBE_SUCCESS;
1972}
1973
1974/**
1975 *  ixgbe_add_uc_addr - Adds a secondary unicast address.
1976 *  @hw: pointer to hardware structure
1977 *  @addr: new address
1978 *
1979 *  Adds it to unused receive address register or goes into promiscuous mode.
1980 **/
1981void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
1982{
1983	u32 rar_entries = hw->mac.num_rar_entries;
1984	u32 rar;
1985
1986	DEBUGFUNC("ixgbe_add_uc_addr");
1987
1988	DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
1989		  addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
1990
1991	/*
1992	 * Place this address in the RAR if there is room,
1993	 * else put the controller into promiscuous mode
1994	 */
1995	if (hw->addr_ctrl.rar_used_count < rar_entries) {
1996		rar = hw->addr_ctrl.rar_used_count;
1997		hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
1998		DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
1999		hw->addr_ctrl.rar_used_count++;
2000	} else {
2001		hw->addr_ctrl.overflow_promisc++;
2002	}
2003
2004	DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2005}
2006
2007/**
2008 *  ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2009 *  @hw: pointer to hardware structure
2010 *  @addr_list: the list of new addresses
2011 *  @addr_count: number of addresses
2012 *  @next: iterator function to walk the address list
2013 *
2014 *  The given list replaces any existing list.  Clears the secondary addrs from
2015 *  receive address registers.  Uses unused receive address registers for the
2016 *  first secondary addresses, and falls back to promiscuous mode as needed.
2017 *
2018 *  Drivers using secondary unicast addresses must set user_set_promisc when
2019 *  manually putting the device into promiscuous mode.
2020 **/
2021s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2022				      u32 addr_count, ixgbe_mc_addr_itr next)
2023{
2024	u8 *addr;
2025	u32 i;
2026	u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2027	u32 uc_addr_in_use;
2028	u32 fctrl;
2029	u32 vmdq;
2030
2031	DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2032
2033	/*
2034	 * Clear accounting of old secondary address list,
2035	 * don't count RAR[0]
2036	 */
2037	uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2038	hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2039	hw->addr_ctrl.overflow_promisc = 0;
2040
2041	/* Zero out the other receive addresses */
2042	DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2043	for (i = 0; i < uc_addr_in_use; i++) {
2044		IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2045		IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2046	}
2047
2048	/* Add the new addresses */
2049	for (i = 0; i < addr_count; i++) {
2050		DEBUGOUT(" Adding the secondary addresses:\n");
2051		addr = next(hw, &addr_list, &vmdq);
2052		ixgbe_add_uc_addr(hw, addr, vmdq);
2053	}
2054
2055	if (hw->addr_ctrl.overflow_promisc) {
2056		/* enable promisc if not already in overflow or set by user */
2057		if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2058			DEBUGOUT(" Entering address overflow promisc mode\n");
2059			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2060			fctrl |= IXGBE_FCTRL_UPE;
2061			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2062		}
2063	} else {
2064		/* only disable if set by overflow, not by user */
2065		if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2066			DEBUGOUT(" Leaving address overflow promisc mode\n");
2067			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2068			fctrl &= ~IXGBE_FCTRL_UPE;
2069			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2070		}
2071	}
2072
2073	DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2074	return IXGBE_SUCCESS;
2075}
2076
2077/**
2078 *  ixgbe_mta_vector - Determines bit-vector in multicast table to set
2079 *  @hw: pointer to hardware structure
2080 *  @mc_addr: the multicast address
2081 *
2082 *  Extracts the 12 bits, from a multicast address, to determine which
2083 *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
2084 *  incoming rx multicast addresses, to determine the bit-vector to check in
2085 *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2086 *  by the MO field of the MCSTCTRL. The MO field is set during initialization
2087 *  to mc_filter_type.
2088 **/
2089static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2090{
2091	u32 vector = 0;
2092
2093	DEBUGFUNC("ixgbe_mta_vector");
2094
2095	switch (hw->mac.mc_filter_type) {
2096	case 0:   /* use bits [47:36] of the address */
2097		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2098		break;
2099	case 1:   /* use bits [46:35] of the address */
2100		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2101		break;
2102	case 2:   /* use bits [45:34] of the address */
2103		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2104		break;
2105	case 3:   /* use bits [43:32] of the address */
2106		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2107		break;
2108	default:  /* Invalid mc_filter_type */
2109		DEBUGOUT("MC filter type param set incorrectly\n");
2110		ASSERT(0);
2111		break;
2112	}
2113
2114	/* vector can only be 12-bits or boundary will be exceeded */
2115	vector &= 0xFFF;
2116	return vector;
2117}
2118
2119/**
2120 *  ixgbe_set_mta - Set bit-vector in multicast table
2121 *  @hw: pointer to hardware structure
2122 *  @hash_value: Multicast address hash value
2123 *
2124 *  Sets the bit-vector in the multicast table.
2125 **/
2126void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2127{
2128	u32 vector;
2129	u32 vector_bit;
2130	u32 vector_reg;
2131
2132	DEBUGFUNC("ixgbe_set_mta");
2133
2134	hw->addr_ctrl.mta_in_use++;
2135
2136	vector = ixgbe_mta_vector(hw, mc_addr);
2137	DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2138
2139	/*
2140	 * The MTA is a register array of 128 32-bit registers. It is treated
2141	 * like an array of 4096 bits.  We want to set bit
2142	 * BitArray[vector_value]. So we figure out what register the bit is
2143	 * in, read it, OR in the new bit, then write back the new value.  The
2144	 * register is determined by the upper 7 bits of the vector value and
2145	 * the bit within that register are determined by the lower 5 bits of
2146	 * the value.
2147	 */
2148	vector_reg = (vector >> 5) & 0x7F;
2149	vector_bit = vector & 0x1F;
2150	hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2151}
2152
2153/**
2154 *  ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2155 *  @hw: pointer to hardware structure
2156 *  @mc_addr_list: the list of new multicast addresses
2157 *  @mc_addr_count: number of addresses
2158 *  @next: iterator function to walk the multicast address list
2159 *  @clear: flag, when set clears the table beforehand
2160 *
2161 *  When the clear flag is set, the given list replaces any existing list.
2162 *  Hashes the given addresses into the multicast table.
2163 **/
2164s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2165				      u32 mc_addr_count, ixgbe_mc_addr_itr next,
2166				      bool clear)
2167{
2168	u32 i;
2169	u32 vmdq;
2170
2171	DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2172
2173	/*
2174	 * Set the new number of MC addresses that we are being requested to
2175	 * use.
2176	 */
2177	hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2178	hw->addr_ctrl.mta_in_use = 0;
2179
2180	/* Clear mta_shadow */
2181	if (clear) {
2182		DEBUGOUT(" Clearing MTA\n");
2183		memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2184	}
2185
2186	/* Update mta_shadow */
2187	for (i = 0; i < mc_addr_count; i++) {
2188		DEBUGOUT(" Adding the multicast addresses:\n");
2189		ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2190	}
2191
2192	/* Enable mta */
2193	for (i = 0; i < hw->mac.mcft_size; i++)
2194		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2195				      hw->mac.mta_shadow[i]);
2196
2197	if (hw->addr_ctrl.mta_in_use > 0)
2198		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2199				IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2200
2201	DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2202	return IXGBE_SUCCESS;
2203}
2204
2205/**
2206 *  ixgbe_enable_mc_generic - Enable multicast address in RAR
2207 *  @hw: pointer to hardware structure
2208 *
2209 *  Enables multicast address in RAR and the use of the multicast hash table.
2210 **/
2211s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2212{
2213	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2214
2215	DEBUGFUNC("ixgbe_enable_mc_generic");
2216
2217	if (a->mta_in_use > 0)
2218		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2219				hw->mac.mc_filter_type);
2220
2221	return IXGBE_SUCCESS;
2222}
2223
2224/**
2225 *  ixgbe_disable_mc_generic - Disable multicast address in RAR
2226 *  @hw: pointer to hardware structure
2227 *
2228 *  Disables multicast address in RAR and the use of the multicast hash table.
2229 **/
2230s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2231{
2232	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2233
2234	DEBUGFUNC("ixgbe_disable_mc_generic");
2235
2236	if (a->mta_in_use > 0)
2237		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2238
2239	return IXGBE_SUCCESS;
2240}
2241
2242/**
2243 *  ixgbe_fc_enable_generic - Enable flow control
2244 *  @hw: pointer to hardware structure
2245 *  @packetbuf_num: packet buffer number (0-7)
2246 *
2247 *  Enable flow control according to the current settings.
2248 **/
2249s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
2250{
2251	s32 ret_val = IXGBE_SUCCESS;
2252	u32 mflcn_reg, fccfg_reg;
2253	u32 reg;
2254	u32 fcrtl, fcrth;
2255
2256	DEBUGFUNC("ixgbe_fc_enable_generic");
2257
2258	/* Negotiate the fc mode to use */
2259	ret_val = ixgbe_fc_autoneg(hw);
2260	if (ret_val == IXGBE_ERR_FLOW_CONTROL)
2261		goto out;
2262
2263	/* Disable any previous flow control settings */
2264	mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2265	mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE);
2266
2267	fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2268	fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2269
2270	/*
2271	 * The possible values of fc.current_mode are:
2272	 * 0: Flow control is completely disabled
2273	 * 1: Rx flow control is enabled (we can receive pause frames,
2274	 *    but not send pause frames).
2275	 * 2: Tx flow control is enabled (we can send pause frames but
2276	 *    we do not support receiving pause frames).
2277	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2278	 * other: Invalid.
2279	 */
2280	switch (hw->fc.current_mode) {
2281	case ixgbe_fc_none:
2282		/*
2283		 * Flow control is disabled by software override or autoneg.
2284		 * The code below will actually disable it in the HW.
2285		 */
2286		break;
2287	case ixgbe_fc_rx_pause:
2288		/*
2289		 * Rx Flow control is enabled and Tx Flow control is
2290		 * disabled by software override. Since there really
2291		 * isn't a way to advertise that we are capable of RX
2292		 * Pause ONLY, we will advertise that we support both
2293		 * symmetric and asymmetric Rx PAUSE.  Later, we will
2294		 * disable the adapter's ability to send PAUSE frames.
2295		 */
2296		mflcn_reg |= IXGBE_MFLCN_RFCE;
2297		break;
2298	case ixgbe_fc_tx_pause:
2299		/*
2300		 * Tx Flow control is enabled, and Rx Flow control is
2301		 * disabled by software override.
2302		 */
2303		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2304		break;
2305	case ixgbe_fc_full:
2306		/* Flow control (both Rx and Tx) is enabled by SW override. */
2307		mflcn_reg |= IXGBE_MFLCN_RFCE;
2308		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2309		break;
2310	default:
2311		DEBUGOUT("Flow control param set incorrectly\n");
2312		ret_val = IXGBE_ERR_CONFIG;
2313		goto out;
2314		break;
2315	}
2316
2317	/* Set 802.3x based flow control settings. */
2318	mflcn_reg |= IXGBE_MFLCN_DPF;
2319	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2320	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2321
2322	fcrth = hw->fc.high_water[packetbuf_num] << 10;
2323	fcrtl = hw->fc.low_water << 10;
2324
2325	if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
2326		fcrth |= IXGBE_FCRTH_FCEN;
2327		if (hw->fc.send_xon)
2328			fcrtl |= IXGBE_FCRTL_XONE;
2329	}
2330
2331	IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth);
2332	IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl);
2333
2334	/* Configure pause time (2 TCs per register) */
2335	reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
2336	if ((packetbuf_num & 1) == 0)
2337		reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
2338	else
2339		reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
2340	IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
2341
2342	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
2343
2344out:
2345	return ret_val;
2346}
2347
2348/**
2349 *  ixgbe_fc_autoneg - Configure flow control
2350 *  @hw: pointer to hardware structure
2351 *
2352 *  Compares our advertised flow control capabilities to those advertised by
2353 *  our link partner, and determines the proper flow control mode to use.
2354 **/
2355s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2356{
2357	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2358	ixgbe_link_speed speed;
2359	bool link_up;
2360
2361	DEBUGFUNC("ixgbe_fc_autoneg");
2362
2363	if (hw->fc.disable_fc_autoneg)
2364		goto out;
2365
2366	/*
2367	 * AN should have completed when the cable was plugged in.
2368	 * Look for reasons to bail out.  Bail out if:
2369	 * - FC autoneg is disabled, or if
2370	 * - link is not up.
2371	 *
2372	 * Since we're being called from an LSC, link is already known to be up.
2373	 * So use link_up_wait_to_complete=FALSE.
2374	 */
2375	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2376	if (!link_up) {
2377		ret_val = IXGBE_ERR_FLOW_CONTROL;
2378		goto out;
2379	}
2380
2381	switch (hw->phy.media_type) {
2382	/* Autoneg flow control on fiber adapters */
2383	case ixgbe_media_type_fiber:
2384		if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2385			ret_val = ixgbe_fc_autoneg_fiber(hw);
2386		break;
2387
2388	/* Autoneg flow control on backplane adapters */
2389	case ixgbe_media_type_backplane:
2390		ret_val = ixgbe_fc_autoneg_backplane(hw);
2391		break;
2392
2393	/* Autoneg flow control on copper adapters */
2394	case ixgbe_media_type_copper:
2395		if (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)
2396			ret_val = ixgbe_fc_autoneg_copper(hw);
2397		break;
2398
2399	default:
2400		break;
2401	}
2402
2403out:
2404	if (ret_val == IXGBE_SUCCESS) {
2405		hw->fc.fc_was_autonegged = TRUE;
2406	} else {
2407		hw->fc.fc_was_autonegged = FALSE;
2408		hw->fc.current_mode = hw->fc.requested_mode;
2409	}
2410	return ret_val;
2411}
2412
2413/**
2414 *  ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2415 *  @hw: pointer to hardware structure
2416 *
2417 *  Enable flow control according on 1 gig fiber.
2418 **/
2419static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2420{
2421	u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2422	s32 ret_val;
2423
2424	/*
2425	 * On multispeed fiber at 1g, bail out if
2426	 * - link is up but AN did not complete, or if
2427	 * - link is up and AN completed but timed out
2428	 */
2429
2430	linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2431	if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2432	    (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
2433		ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2434		goto out;
2435	}
2436
2437	pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2438	pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2439
2440	ret_val =  ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2441				      pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2442				      IXGBE_PCS1GANA_ASM_PAUSE,
2443				      IXGBE_PCS1GANA_SYM_PAUSE,
2444				      IXGBE_PCS1GANA_ASM_PAUSE);
2445
2446out:
2447	return ret_val;
2448}
2449
2450/**
2451 *  ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2452 *  @hw: pointer to hardware structure
2453 *
2454 *  Enable flow control according to IEEE clause 37.
2455 **/
2456static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2457{
2458	u32 links2, anlp1_reg, autoc_reg, links;
2459	s32 ret_val;
2460
2461	/*
2462	 * On backplane, bail out if
2463	 * - backplane autoneg was not completed, or if
2464	 * - we are 82599 and link partner is not AN enabled
2465	 */
2466	links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2467	if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
2468		hw->fc.fc_was_autonegged = FALSE;
2469		hw->fc.current_mode = hw->fc.requested_mode;
2470		ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2471		goto out;
2472	}
2473
2474	if (hw->mac.type == ixgbe_mac_82599EB) {
2475		links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2476		if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
2477			hw->fc.fc_was_autonegged = FALSE;
2478			hw->fc.current_mode = hw->fc.requested_mode;
2479			ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2480			goto out;
2481		}
2482	}
2483	/*
2484	 * Read the 10g AN autoc and LP ability registers and resolve
2485	 * local flow control settings accordingly
2486	 */
2487	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2488	anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2489
2490	ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2491		anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2492		IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2493
2494out:
2495	return ret_val;
2496}
2497
2498/**
2499 *  ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
2500 *  @hw: pointer to hardware structure
2501 *
2502 *  Enable flow control according to IEEE clause 37.
2503 **/
2504static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2505{
2506	u16 technology_ability_reg = 0;
2507	u16 lp_technology_ability_reg = 0;
2508
2509	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2510			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2511			     &technology_ability_reg);
2512	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
2513			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2514			     &lp_technology_ability_reg);
2515
2516	return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
2517				  (u32)lp_technology_ability_reg,
2518				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2519				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2520}
2521
2522/**
2523 *  ixgbe_negotiate_fc - Negotiate flow control
2524 *  @hw: pointer to hardware structure
2525 *  @adv_reg: flow control advertised settings
2526 *  @lp_reg: link partner's flow control settings
2527 *  @adv_sym: symmetric pause bit in advertisement
2528 *  @adv_asm: asymmetric pause bit in advertisement
2529 *  @lp_sym: symmetric pause bit in link partner advertisement
2530 *  @lp_asm: asymmetric pause bit in link partner advertisement
2531 *
2532 *  Find the intersection between advertised settings and link partner's
2533 *  advertised settings
2534 **/
2535static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2536			      u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2537{
2538	if ((!(adv_reg)) ||  (!(lp_reg)))
2539		return IXGBE_ERR_FC_NOT_NEGOTIATED;
2540
2541	if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2542		/*
2543		 * Now we need to check if the user selected Rx ONLY
2544		 * of pause frames.  In this case, we had to advertise
2545		 * FULL flow control because we could not advertise RX
2546		 * ONLY. Hence, we must now check to see if we need to
2547		 * turn OFF the TRANSMISSION of PAUSE frames.
2548		 */
2549		if (hw->fc.requested_mode == ixgbe_fc_full) {
2550			hw->fc.current_mode = ixgbe_fc_full;
2551			DEBUGOUT("Flow Control = FULL.\n");
2552		} else {
2553			hw->fc.current_mode = ixgbe_fc_rx_pause;
2554			DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2555		}
2556	} else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2557		   (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2558		hw->fc.current_mode = ixgbe_fc_tx_pause;
2559		DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2560	} else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2561		   !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2562		hw->fc.current_mode = ixgbe_fc_rx_pause;
2563		DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2564	} else {
2565		hw->fc.current_mode = ixgbe_fc_none;
2566		DEBUGOUT("Flow Control = NONE.\n");
2567	}
2568	return IXGBE_SUCCESS;
2569}
2570
2571/**
2572 *  ixgbe_setup_fc - Set up flow control
2573 *  @hw: pointer to hardware structure
2574 *
2575 *  Called at init time to set up flow control.
2576 **/
2577static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
2578{
2579	s32 ret_val = IXGBE_SUCCESS;
2580	u32 reg = 0, reg_bp = 0;
2581	u16 reg_cu = 0;
2582
2583	DEBUGFUNC("ixgbe_setup_fc");
2584
2585	/* Validate the packetbuf configuration */
2586	if (packetbuf_num < 0 || packetbuf_num > 7) {
2587		DEBUGOUT1("Invalid packet buffer number [%d], expected range "
2588			  "is 0-7\n", packetbuf_num);
2589		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2590		goto out;
2591	}
2592
2593	/*
2594	 * Validate the water mark configuration.  Zero water marks are invalid
2595	 * because it causes the controller to just blast out fc packets.
2596	 */
2597	if (!hw->fc.low_water ||
2598	    !hw->fc.high_water[packetbuf_num] ||
2599	    !hw->fc.pause_time) {
2600		DEBUGOUT("Invalid water mark configuration\n");
2601		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2602		goto out;
2603	}
2604
2605	/*
2606	 * Validate the requested mode.  Strict IEEE mode does not allow
2607	 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
2608	 */
2609	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
2610		DEBUGOUT("ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
2611		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2612		goto out;
2613	}
2614
2615	/*
2616	 * 10gig parts do not have a word in the EEPROM to determine the
2617	 * default flow control setting, so we explicitly set it to full.
2618	 */
2619	if (hw->fc.requested_mode == ixgbe_fc_default)
2620		hw->fc.requested_mode = ixgbe_fc_full;
2621
2622	/*
2623	 * Set up the 1G and 10G flow control advertisement registers so the
2624	 * HW will be able to do fc autoneg once the cable is plugged in.  If
2625	 * we link at 10G, the 1G advertisement is harmless and vice versa.
2626	 */
2627
2628	switch (hw->phy.media_type) {
2629	case ixgbe_media_type_fiber:
2630	case ixgbe_media_type_backplane:
2631		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2632		reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2633		break;
2634
2635	case ixgbe_media_type_copper:
2636		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2637				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
2638		break;
2639
2640	default:
2641		;
2642	}
2643
2644	/*
2645	 * The possible values of fc.requested_mode are:
2646	 * 0: Flow control is completely disabled
2647	 * 1: Rx flow control is enabled (we can receive pause frames,
2648	 *    but not send pause frames).
2649	 * 2: Tx flow control is enabled (we can send pause frames but
2650	 *    we do not support receiving pause frames).
2651	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2652	 * other: Invalid.
2653	 */
2654	switch (hw->fc.requested_mode) {
2655	case ixgbe_fc_none:
2656		/* Flow control completely disabled by software override. */
2657		reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2658		if (hw->phy.media_type == ixgbe_media_type_backplane)
2659			reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
2660				    IXGBE_AUTOC_ASM_PAUSE);
2661		else if (hw->phy.media_type == ixgbe_media_type_copper)
2662			reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2663		break;
2664	case ixgbe_fc_rx_pause:
2665		/*
2666		 * Rx Flow control is enabled and Tx Flow control is
2667		 * disabled by software override. Since there really
2668		 * isn't a way to advertise that we are capable of RX
2669		 * Pause ONLY, we will advertise that we support both
2670		 * symmetric and asymmetric Rx PAUSE.  Later, we will
2671		 * disable the adapter's ability to send PAUSE frames.
2672		 */
2673		reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2674		if (hw->phy.media_type == ixgbe_media_type_backplane)
2675			reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
2676				   IXGBE_AUTOC_ASM_PAUSE);
2677		else if (hw->phy.media_type == ixgbe_media_type_copper)
2678			reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2679		break;
2680	case ixgbe_fc_tx_pause:
2681		/*
2682		 * Tx Flow control is enabled, and Rx Flow control is
2683		 * disabled by software override.
2684		 */
2685		reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
2686		reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
2687		if (hw->phy.media_type == ixgbe_media_type_backplane) {
2688			reg_bp |= (IXGBE_AUTOC_ASM_PAUSE);
2689			reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE);
2690		} else if (hw->phy.media_type == ixgbe_media_type_copper) {
2691			reg_cu |= (IXGBE_TAF_ASM_PAUSE);
2692			reg_cu &= ~(IXGBE_TAF_SYM_PAUSE);
2693		}
2694		break;
2695	case ixgbe_fc_full:
2696		/* Flow control (both Rx and Tx) is enabled by SW override. */
2697		reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2698		if (hw->phy.media_type == ixgbe_media_type_backplane)
2699			reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
2700				   IXGBE_AUTOC_ASM_PAUSE);
2701		else if (hw->phy.media_type == ixgbe_media_type_copper)
2702			reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2703		break;
2704	default:
2705		DEBUGOUT("Flow control param set incorrectly\n");
2706		ret_val = IXGBE_ERR_CONFIG;
2707		goto out;
2708		break;
2709	}
2710
2711	if (hw->mac.type != ixgbe_mac_X540) {
2712		/*
2713		 * Enable auto-negotiation between the MAC & PHY;
2714		 * the MAC will advertise clause 37 flow control.
2715		 */
2716		IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
2717		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
2718
2719		/* Disable AN timeout */
2720		if (hw->fc.strict_ieee)
2721			reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
2722
2723		IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
2724		DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
2725	}
2726
2727	/*
2728	 * AUTOC restart handles negotiation of 1G and 10G on backplane
2729	 * and copper. There is no need to set the PCS1GCTL register.
2730	 *
2731	 */
2732	if (hw->phy.media_type == ixgbe_media_type_backplane) {
2733		reg_bp |= IXGBE_AUTOC_AN_RESTART;
2734		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
2735	} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
2736		    (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) {
2737		hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2738				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
2739	}
2740
2741	DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
2742out:
2743	return ret_val;
2744}
2745
2746/**
2747 *  ixgbe_disable_pcie_master - Disable PCI-express master access
2748 *  @hw: pointer to hardware structure
2749 *
2750 *  Disables PCI-Express master access and verifies there are no pending
2751 *  requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
2752 *  bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
2753 *  is returned signifying master requests disabled.
2754 **/
2755s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2756{
2757	s32 status = IXGBE_SUCCESS;
2758	u32 i;
2759
2760	DEBUGFUNC("ixgbe_disable_pcie_master");
2761
2762	/* Always set this bit to ensure any future transactions are blocked */
2763	IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
2764
2765	/* Exit if master requets are blocked */
2766	if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2767		goto out;
2768
2769	/* Poll for master request bit to clear */
2770	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2771		usec_delay(100);
2772		if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2773			goto out;
2774	}
2775
2776	/*
2777	 * Two consecutive resets are required via CTRL.RST per datasheet
2778	 * 5.2.5.3.2 Master Disable.  We set a flag to inform the reset routine
2779	 * of this need.  The first reset prevents new master requests from
2780	 * being issued by our device.  We then must wait 1usec or more for any
2781	 * remaining completions from the PCIe bus to trickle in, and then reset
2782	 * again to clear out any effects they may have had on our device.
2783	 */
2784	DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
2785	hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2786
2787	/*
2788	 * Before proceeding, make sure that the PCIe block does not have
2789	 * transactions pending.
2790	 */
2791	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2792		usec_delay(100);
2793		if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
2794		    IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
2795			goto out;
2796	}
2797
2798	DEBUGOUT("PCIe transaction pending bit also did not clear.\n");
2799	status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
2800
2801out:
2802	return status;
2803}
2804
2805/**
2806 *  ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
2807 *  @hw: pointer to hardware structure
2808 *  @mask: Mask to specify which semaphore to acquire
2809 *
2810 *  Acquires the SWFW semaphore through the GSSR register for the specified
2811 *  function (CSR, PHY0, PHY1, EEPROM, Flash)
2812 **/
2813s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2814{
2815	u32 gssr;
2816	u32 swmask = mask;
2817	u32 fwmask = mask << 5;
2818	s32 timeout = 200;
2819
2820	DEBUGFUNC("ixgbe_acquire_swfw_sync");
2821
2822	while (timeout) {
2823		/*
2824		 * SW EEPROM semaphore bit is used for access to all
2825		 * SW_FW_SYNC/GSSR bits (not just EEPROM)
2826		 */
2827		if (ixgbe_get_eeprom_semaphore(hw))
2828			return IXGBE_ERR_SWFW_SYNC;
2829
2830		gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2831		if (!(gssr & (fwmask | swmask)))
2832			break;
2833
2834		/*
2835		 * Firmware currently using resource (fwmask) or other software
2836		 * thread currently using resource (swmask)
2837		 */
2838		ixgbe_release_eeprom_semaphore(hw);
2839		msec_delay(5);
2840		timeout--;
2841	}
2842
2843	if (!timeout) {
2844		DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
2845		return IXGBE_ERR_SWFW_SYNC;
2846	}
2847
2848	gssr |= swmask;
2849	IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2850
2851	ixgbe_release_eeprom_semaphore(hw);
2852	return IXGBE_SUCCESS;
2853}
2854
2855/**
2856 *  ixgbe_release_swfw_sync - Release SWFW semaphore
2857 *  @hw: pointer to hardware structure
2858 *  @mask: Mask to specify which semaphore to release
2859 *
2860 *  Releases the SWFW semaphore through the GSSR register for the specified
2861 *  function (CSR, PHY0, PHY1, EEPROM, Flash)
2862 **/
2863void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2864{
2865	u32 gssr;
2866	u32 swmask = mask;
2867
2868	DEBUGFUNC("ixgbe_release_swfw_sync");
2869
2870	ixgbe_get_eeprom_semaphore(hw);
2871
2872	gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2873	gssr &= ~swmask;
2874	IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2875
2876	ixgbe_release_eeprom_semaphore(hw);
2877}
2878
2879/**
2880 *  ixgbe_disable_sec_rx_path_generic - Stops the receive data path
2881 *  @hw: pointer to hardware structure
2882 *
2883 *  Stops the receive data path and waits for the HW to internally empty
2884 *  the Rx security block
2885 **/
2886s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
2887{
2888#define IXGBE_MAX_SECRX_POLL 40
2889
2890	int i;
2891	int secrxreg;
2892
2893	DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
2894
2895
2896	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2897	secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
2898	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2899	for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
2900		secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
2901		if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
2902			break;
2903		else
2904			/* Use interrupt-safe sleep just in case */
2905			usec_delay(1000);
2906	}
2907
2908	/* For informational purposes only */
2909	if (i >= IXGBE_MAX_SECRX_POLL)
2910		DEBUGOUT("Rx unit being enabled before security "
2911			 "path fully disabled.  Continuing with init.\n");
2912
2913	return IXGBE_SUCCESS;
2914}
2915
2916/**
2917 *  ixgbe_enable_sec_rx_path_generic - Enables the receive data path
2918 *  @hw: pointer to hardware structure
2919 *
2920 *  Enables the receive data path.
2921 **/
2922s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
2923{
2924	int secrxreg;
2925
2926	DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
2927
2928	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2929	secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
2930	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2931	IXGBE_WRITE_FLUSH(hw);
2932
2933	return IXGBE_SUCCESS;
2934}
2935
2936/**
2937 *  ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
2938 *  @hw: pointer to hardware structure
2939 *  @regval: register value to write to RXCTRL
2940 *
2941 *  Enables the Rx DMA unit
2942 **/
2943s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
2944{
2945	DEBUGFUNC("ixgbe_enable_rx_dma_generic");
2946
2947	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2948
2949	return IXGBE_SUCCESS;
2950}
2951
2952/**
2953 *  ixgbe_blink_led_start_generic - Blink LED based on index.
2954 *  @hw: pointer to hardware structure
2955 *  @index: led number to blink
2956 **/
2957s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2958{
2959	ixgbe_link_speed speed = 0;
2960	bool link_up = 0;
2961	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2962	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2963
2964	DEBUGFUNC("ixgbe_blink_led_start_generic");
2965
2966	/*
2967	 * Link must be up to auto-blink the LEDs;
2968	 * Force it if link is down.
2969	 */
2970	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2971
2972	if (!link_up) {
2973		autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2974		autoc_reg |= IXGBE_AUTOC_FLU;
2975		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2976		IXGBE_WRITE_FLUSH(hw);
2977		msec_delay(10);
2978	}
2979
2980	led_reg &= ~IXGBE_LED_MODE_MASK(index);
2981	led_reg |= IXGBE_LED_BLINK(index);
2982	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2983	IXGBE_WRITE_FLUSH(hw);
2984
2985	return IXGBE_SUCCESS;
2986}
2987
2988/**
2989 *  ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
2990 *  @hw: pointer to hardware structure
2991 *  @index: led number to stop blinking
2992 **/
2993s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
2994{
2995	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2996	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2997
2998	DEBUGFUNC("ixgbe_blink_led_stop_generic");
2999
3000
3001	autoc_reg &= ~IXGBE_AUTOC_FLU;
3002	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3003	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
3004
3005	led_reg &= ~IXGBE_LED_MODE_MASK(index);
3006	led_reg &= ~IXGBE_LED_BLINK(index);
3007	led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3008	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3009	IXGBE_WRITE_FLUSH(hw);
3010
3011	return IXGBE_SUCCESS;
3012}
3013
3014/**
3015 *  ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3016 *  @hw: pointer to hardware structure
3017 *  @san_mac_offset: SAN MAC address offset
3018 *
3019 *  This function will read the EEPROM location for the SAN MAC address
3020 *  pointer, and returns the value at that location.  This is used in both
3021 *  get and set mac_addr routines.
3022 **/
3023static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3024					 u16 *san_mac_offset)
3025{
3026	DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3027
3028	/*
3029	 * First read the EEPROM pointer to see if the MAC addresses are
3030	 * available.
3031	 */
3032	hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
3033
3034	return IXGBE_SUCCESS;
3035}
3036
3037/**
3038 *  ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3039 *  @hw: pointer to hardware structure
3040 *  @san_mac_addr: SAN MAC address
3041 *
3042 *  Reads the SAN MAC address from the EEPROM, if it's available.  This is
3043 *  per-port, so set_lan_id() must be called before reading the addresses.
3044 *  set_lan_id() is called by identify_sfp(), but this cannot be relied
3045 *  upon for non-SFP connections, so we must call it here.
3046 **/
3047s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3048{
3049	u16 san_mac_data, san_mac_offset;
3050	u8 i;
3051
3052	DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3053
3054	/*
3055	 * First read the EEPROM pointer to see if the MAC addresses are
3056	 * available.  If they're not, no point in calling set_lan_id() here.
3057	 */
3058	ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3059
3060	if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
3061		/*
3062		 * No addresses available in this EEPROM.  It's not an
3063		 * error though, so just wipe the local address and return.
3064		 */
3065		for (i = 0; i < 6; i++)
3066			san_mac_addr[i] = 0xFF;
3067
3068		goto san_mac_addr_out;
3069	}
3070
3071	/* make sure we know which port we need to program */
3072	hw->mac.ops.set_lan_id(hw);
3073	/* apply the port offset to the address offset */
3074	(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3075			 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3076	for (i = 0; i < 3; i++) {
3077		hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
3078		san_mac_addr[i * 2] = (u8)(san_mac_data);
3079		san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3080		san_mac_offset++;
3081	}
3082
3083san_mac_addr_out:
3084	return IXGBE_SUCCESS;
3085}
3086
3087/**
3088 *  ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3089 *  @hw: pointer to hardware structure
3090 *  @san_mac_addr: SAN MAC address
3091 *
3092 *  Write a SAN MAC address to the EEPROM.
3093 **/
3094s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3095{
3096	s32 status = IXGBE_SUCCESS;
3097	u16 san_mac_data, san_mac_offset;
3098	u8 i;
3099
3100	DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3101
3102	/* Look for SAN mac address pointer.  If not defined, return */
3103	ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3104
3105	if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
3106		status = IXGBE_ERR_NO_SAN_ADDR_PTR;
3107		goto san_mac_addr_out;
3108	}
3109
3110	/* Make sure we know which port we need to write */
3111	hw->mac.ops.set_lan_id(hw);
3112	/* Apply the port offset to the address offset */
3113	(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3114			 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3115
3116	for (i = 0; i < 3; i++) {
3117		san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3118		san_mac_data |= (u16)(san_mac_addr[i * 2]);
3119		hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3120		san_mac_offset++;
3121	}
3122
3123san_mac_addr_out:
3124	return status;
3125}
3126
3127/**
3128 *  ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3129 *  @hw: pointer to hardware structure
3130 *
3131 *  Read PCIe configuration space, and get the MSI-X vector count from
3132 *  the capabilities table.
3133 **/
3134u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3135{
3136	u32 msix_count = 64;
3137
3138	DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3139	if (hw->mac.msix_vectors_from_pcie) {
3140		msix_count = IXGBE_READ_PCIE_WORD(hw,
3141						  IXGBE_PCIE_MSIX_82599_CAPS);
3142		msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3143
3144		/* MSI-X count is zero-based in HW, so increment to give
3145		 * proper value */
3146		msix_count++;
3147	}
3148
3149	return msix_count;
3150}
3151
3152/**
3153 *  ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3154 *  @hw: pointer to hardware structure
3155 *  @addr: Address to put into receive address register
3156 *  @vmdq: VMDq pool to assign
3157 *
3158 *  Puts an ethernet address into a receive address register, or
3159 *  finds the rar that it is aleady in; adds to the pool list
3160 **/
3161s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3162{
3163	static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3164	u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3165	u32 rar;
3166	u32 rar_low, rar_high;
3167	u32 addr_low, addr_high;
3168
3169	DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3170
3171	/* swap bytes for HW little endian */
3172	addr_low  = addr[0] | (addr[1] << 8)
3173			    | (addr[2] << 16)
3174			    | (addr[3] << 24);
3175	addr_high = addr[4] | (addr[5] << 8);
3176
3177	/*
3178	 * Either find the mac_id in rar or find the first empty space.
3179	 * rar_highwater points to just after the highest currently used
3180	 * rar in order to shorten the search.  It grows when we add a new
3181	 * rar to the top.
3182	 */
3183	for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3184		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3185
3186		if (((IXGBE_RAH_AV & rar_high) == 0)
3187		    && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3188			first_empty_rar = rar;
3189		} else if ((rar_high & 0xFFFF) == addr_high) {
3190			rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3191			if (rar_low == addr_low)
3192				break;    /* found it already in the rars */
3193		}
3194	}
3195
3196	if (rar < hw->mac.rar_highwater) {
3197		/* already there so just add to the pool bits */
3198		ixgbe_set_vmdq(hw, rar, vmdq);
3199	} else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3200		/* stick it into first empty RAR slot we found */
3201		rar = first_empty_rar;
3202		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3203	} else if (rar == hw->mac.rar_highwater) {
3204		/* add it to the top of the list and inc the highwater mark */
3205		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3206		hw->mac.rar_highwater++;
3207	} else if (rar >= hw->mac.num_rar_entries) {
3208		return IXGBE_ERR_INVALID_MAC_ADDR;
3209	}
3210
3211	/*
3212	 * If we found rar[0], make sure the default pool bit (we use pool 0)
3213	 * remains cleared to be sure default pool packets will get delivered
3214	 */
3215	if (rar == 0)
3216		ixgbe_clear_vmdq(hw, rar, 0);
3217
3218	return rar;
3219}
3220
3221/**
3222 *  ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3223 *  @hw: pointer to hardware struct
3224 *  @rar: receive address register index to disassociate
3225 *  @vmdq: VMDq pool index to remove from the rar
3226 **/
3227s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3228{
3229	u32 mpsar_lo, mpsar_hi;
3230	u32 rar_entries = hw->mac.num_rar_entries;
3231
3232	DEBUGFUNC("ixgbe_clear_vmdq_generic");
3233
3234	/* Make sure we are using a valid rar index range */
3235	if (rar >= rar_entries) {
3236		DEBUGOUT1("RAR index %d is out of range.\n", rar);
3237		return IXGBE_ERR_INVALID_ARGUMENT;
3238	}
3239
3240	mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3241	mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3242
3243	if (!mpsar_lo && !mpsar_hi)
3244		goto done;
3245
3246	if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3247		if (mpsar_lo) {
3248			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3249			mpsar_lo = 0;
3250		}
3251		if (mpsar_hi) {
3252			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3253			mpsar_hi = 0;
3254		}
3255	} else if (vmdq < 32) {
3256		mpsar_lo &= ~(1 << vmdq);
3257		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3258	} else {
3259		mpsar_hi &= ~(1 << (vmdq - 32));
3260		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3261	}
3262
3263	/* was that the last pool using this rar? */
3264	if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
3265		hw->mac.ops.clear_rar(hw, rar);
3266done:
3267	return IXGBE_SUCCESS;
3268}
3269
3270/**
3271 *  ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3272 *  @hw: pointer to hardware struct
3273 *  @rar: receive address register index to associate with a VMDq index
3274 *  @vmdq: VMDq pool index
3275 **/
3276s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3277{
3278	u32 mpsar;
3279	u32 rar_entries = hw->mac.num_rar_entries;
3280
3281	DEBUGFUNC("ixgbe_set_vmdq_generic");
3282
3283	/* Make sure we are using a valid rar index range */
3284	if (rar >= rar_entries) {
3285		DEBUGOUT1("RAR index %d is out of range.\n", rar);
3286		return IXGBE_ERR_INVALID_ARGUMENT;
3287	}
3288
3289	if (vmdq < 32) {
3290		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3291		mpsar |= 1 << vmdq;
3292		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3293	} else {
3294		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3295		mpsar |= 1 << (vmdq - 32);
3296		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3297	}
3298	return IXGBE_SUCCESS;
3299}
3300
3301/**
3302 *  ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3303 *  @hw: pointer to hardware structure
3304 **/
3305s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3306{
3307	int i;
3308
3309	DEBUGFUNC("ixgbe_init_uta_tables_generic");
3310	DEBUGOUT(" Clearing UTA\n");
3311
3312	for (i = 0; i < 128; i++)
3313		IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3314
3315	return IXGBE_SUCCESS;
3316}
3317
3318/**
3319 *  ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3320 *  @hw: pointer to hardware structure
3321 *  @vlan: VLAN id to write to VLAN filter
3322 *
3323 *  return the VLVF index where this VLAN id should be placed
3324 *
3325 **/
3326s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
3327{
3328	u32 bits = 0;
3329	u32 first_empty_slot = 0;
3330	s32 regindex;
3331
3332	/* short cut the special case */
3333	if (vlan == 0)
3334		return 0;
3335
3336	/*
3337	  * Search for the vlan id in the VLVF entries. Save off the first empty
3338	  * slot found along the way
3339	  */
3340	for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
3341		bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3342		if (!bits && !(first_empty_slot))
3343			first_empty_slot = regindex;
3344		else if ((bits & 0x0FFF) == vlan)
3345			break;
3346	}
3347
3348	/*
3349	  * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
3350	  * in the VLVF. Else use the first empty VLVF register for this
3351	  * vlan id.
3352	  */
3353	if (regindex >= IXGBE_VLVF_ENTRIES) {
3354		if (first_empty_slot)
3355			regindex = first_empty_slot;
3356		else {
3357			DEBUGOUT("No space in VLVF.\n");
3358			regindex = IXGBE_ERR_NO_SPACE;
3359		}
3360	}
3361
3362	return regindex;
3363}
3364
3365/**
3366 *  ixgbe_set_vfta_generic - Set VLAN filter table
3367 *  @hw: pointer to hardware structure
3368 *  @vlan: VLAN id to write to VLAN filter
3369 *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
3370 *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
3371 *
3372 *  Turn on/off specified VLAN in the VLAN filter table.
3373 **/
3374s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3375			   bool vlan_on)
3376{
3377	s32 regindex;
3378	u32 bitindex;
3379	u32 vfta;
3380	u32 targetbit;
3381	s32 ret_val = IXGBE_SUCCESS;
3382	bool vfta_changed = FALSE;
3383
3384	DEBUGFUNC("ixgbe_set_vfta_generic");
3385
3386	if (vlan > 4095)
3387		return IXGBE_ERR_PARAM;
3388
3389	/*
3390	 * this is a 2 part operation - first the VFTA, then the
3391	 * VLVF and VLVFB if VT Mode is set
3392	 * We don't write the VFTA until we know the VLVF part succeeded.
3393	 */
3394
3395	/* Part 1
3396	 * The VFTA is a bitstring made up of 128 32-bit registers
3397	 * that enable the particular VLAN id, much like the MTA:
3398	 *    bits[11-5]: which register
3399	 *    bits[4-0]:  which bit in the register
3400	 */
3401	regindex = (vlan >> 5) & 0x7F;
3402	bitindex = vlan & 0x1F;
3403	targetbit = (1 << bitindex);
3404	vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
3405
3406	if (vlan_on) {
3407		if (!(vfta & targetbit)) {
3408			vfta |= targetbit;
3409			vfta_changed = TRUE;
3410		}
3411	} else {
3412		if ((vfta & targetbit)) {
3413			vfta &= ~targetbit;
3414			vfta_changed = TRUE;
3415		}
3416	}
3417
3418	/* Part 2
3419	 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
3420	 */
3421	ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
3422					 &vfta_changed);
3423	if (ret_val != IXGBE_SUCCESS)
3424		return ret_val;
3425
3426	if (vfta_changed)
3427		IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
3428
3429	return IXGBE_SUCCESS;
3430}
3431
3432/**
3433 *  ixgbe_set_vlvf_generic - Set VLAN Pool Filter
3434 *  @hw: pointer to hardware structure
3435 *  @vlan: VLAN id to write to VLAN filter
3436 *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
3437 *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
3438 *  @vfta_changed: pointer to boolean flag which indicates whether VFTA
3439 *                 should be changed
3440 *
3441 *  Turn on/off specified bit in VLVF table.
3442 **/
3443s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3444			    bool vlan_on, bool *vfta_changed)
3445{
3446	u32 vt;
3447
3448	DEBUGFUNC("ixgbe_set_vlvf_generic");
3449
3450	if (vlan > 4095)
3451		return IXGBE_ERR_PARAM;
3452
3453	/* If VT Mode is set
3454	 *   Either vlan_on
3455	 *     make sure the vlan is in VLVF
3456	 *     set the vind bit in the matching VLVFB
3457	 *   Or !vlan_on
3458	 *     clear the pool bit and possibly the vind
3459	 */
3460	vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3461	if (vt & IXGBE_VT_CTL_VT_ENABLE) {
3462		s32 vlvf_index;
3463		u32 bits;
3464
3465		vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
3466		if (vlvf_index < 0)
3467			return vlvf_index;
3468
3469		if (vlan_on) {
3470			/* set the pool bit */
3471			if (vind < 32) {
3472				bits = IXGBE_READ_REG(hw,
3473						IXGBE_VLVFB(vlvf_index * 2));
3474				bits |= (1 << vind);
3475				IXGBE_WRITE_REG(hw,
3476						IXGBE_VLVFB(vlvf_index * 2),
3477						bits);
3478			} else {
3479				bits = IXGBE_READ_REG(hw,
3480					IXGBE_VLVFB((vlvf_index * 2) + 1));
3481				bits |= (1 << (vind - 32));
3482				IXGBE_WRITE_REG(hw,
3483					IXGBE_VLVFB((vlvf_index * 2) + 1),
3484					bits);
3485			}
3486		} else {
3487			/* clear the pool bit */
3488			if (vind < 32) {
3489				bits = IXGBE_READ_REG(hw,
3490						IXGBE_VLVFB(vlvf_index * 2));
3491				bits &= ~(1 << vind);
3492				IXGBE_WRITE_REG(hw,
3493						IXGBE_VLVFB(vlvf_index * 2),
3494						bits);
3495				bits |= IXGBE_READ_REG(hw,
3496					IXGBE_VLVFB((vlvf_index * 2) + 1));
3497			} else {
3498				bits = IXGBE_READ_REG(hw,
3499					IXGBE_VLVFB((vlvf_index * 2) + 1));
3500				bits &= ~(1 << (vind - 32));
3501				IXGBE_WRITE_REG(hw,
3502					IXGBE_VLVFB((vlvf_index * 2) + 1),
3503					bits);
3504				bits |= IXGBE_READ_REG(hw,
3505						IXGBE_VLVFB(vlvf_index * 2));
3506			}
3507		}
3508
3509		/*
3510		 * If there are still bits set in the VLVFB registers
3511		 * for the VLAN ID indicated we need to see if the
3512		 * caller is requesting that we clear the VFTA entry bit.
3513		 * If the caller has requested that we clear the VFTA
3514		 * entry bit but there are still pools/VFs using this VLAN
3515		 * ID entry then ignore the request.  We're not worried
3516		 * about the case where we're turning the VFTA VLAN ID
3517		 * entry bit on, only when requested to turn it off as
3518		 * there may be multiple pools and/or VFs using the
3519		 * VLAN ID entry.  In that case we cannot clear the
3520		 * VFTA bit until all pools/VFs using that VLAN ID have also
3521		 * been cleared.  This will be indicated by "bits" being
3522		 * zero.
3523		 */
3524		if (bits) {
3525			IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
3526					(IXGBE_VLVF_VIEN | vlan));
3527			if ((!vlan_on) && (vfta_changed != NULL)) {
3528				/* someone wants to clear the vfta entry
3529				 * but some pools/VFs are still using it.
3530				 * Ignore it. */
3531				*vfta_changed = FALSE;
3532			}
3533		} else
3534			IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3535	}
3536
3537	return IXGBE_SUCCESS;
3538}
3539
3540/**
3541 *  ixgbe_clear_vfta_generic - Clear VLAN filter table
3542 *  @hw: pointer to hardware structure
3543 *
3544 *  Clears the VLAN filer table, and the VMDq index associated with the filter
3545 **/
3546s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
3547{
3548	u32 offset;
3549
3550	DEBUGFUNC("ixgbe_clear_vfta_generic");
3551
3552	for (offset = 0; offset < hw->mac.vft_size; offset++)
3553		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
3554
3555	for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
3556		IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
3557		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
3558		IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
3559	}
3560
3561	return IXGBE_SUCCESS;
3562}
3563
3564/**
3565 *  ixgbe_check_mac_link_generic - Determine link and speed status
3566 *  @hw: pointer to hardware structure
3567 *  @speed: pointer to link speed
3568 *  @link_up: TRUE when link is up
3569 *  @link_up_wait_to_complete: bool used to wait for link up or not
3570 *
3571 *  Reads the links register to determine if link is up and the current speed
3572 **/
3573s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3574				 bool *link_up, bool link_up_wait_to_complete)
3575{
3576	u32 links_reg, links_orig;
3577	u32 i;
3578
3579	DEBUGFUNC("ixgbe_check_mac_link_generic");
3580
3581	/* clear the old state */
3582	links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
3583
3584	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3585
3586	if (links_orig != links_reg) {
3587		DEBUGOUT2("LINKS changed from %08X to %08X\n",
3588			  links_orig, links_reg);
3589	}
3590
3591	if (link_up_wait_to_complete) {
3592		for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
3593			if (links_reg & IXGBE_LINKS_UP) {
3594				*link_up = TRUE;
3595				break;
3596			} else {
3597				*link_up = FALSE;
3598			}
3599			msec_delay(100);
3600			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3601		}
3602	} else {
3603		if (links_reg & IXGBE_LINKS_UP)
3604			*link_up = TRUE;
3605		else
3606			*link_up = FALSE;
3607	}
3608
3609	if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3610	    IXGBE_LINKS_SPEED_10G_82599)
3611		*speed = IXGBE_LINK_SPEED_10GB_FULL;
3612	else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3613		 IXGBE_LINKS_SPEED_1G_82599)
3614		*speed = IXGBE_LINK_SPEED_1GB_FULL;
3615	else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3616		 IXGBE_LINKS_SPEED_100_82599)
3617		*speed = IXGBE_LINK_SPEED_100_FULL;
3618	else
3619		*speed = IXGBE_LINK_SPEED_UNKNOWN;
3620
3621	return IXGBE_SUCCESS;
3622}
3623
3624/**
3625 *  ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
3626 *  the EEPROM
3627 *  @hw: pointer to hardware structure
3628 *  @wwnn_prefix: the alternative WWNN prefix
3629 *  @wwpn_prefix: the alternative WWPN prefix
3630 *
3631 *  This function will read the EEPROM from the alternative SAN MAC address
3632 *  block to check the support for the alternative WWNN/WWPN prefix support.
3633 **/
3634s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
3635				 u16 *wwpn_prefix)
3636{
3637	u16 offset, caps;
3638	u16 alt_san_mac_blk_offset;
3639
3640	DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
3641
3642	/* clear output first */
3643	*wwnn_prefix = 0xFFFF;
3644	*wwpn_prefix = 0xFFFF;
3645
3646	/* check if alternative SAN MAC is supported */
3647	hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
3648			    &alt_san_mac_blk_offset);
3649
3650	if ((alt_san_mac_blk_offset == 0) ||
3651	    (alt_san_mac_blk_offset == 0xFFFF))
3652		goto wwn_prefix_out;
3653
3654	/* check capability in alternative san mac address block */
3655	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
3656	hw->eeprom.ops.read(hw, offset, &caps);
3657	if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
3658		goto wwn_prefix_out;
3659
3660	/* get the corresponding prefix for WWNN/WWPN */
3661	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
3662	hw->eeprom.ops.read(hw, offset, wwnn_prefix);
3663
3664	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
3665	hw->eeprom.ops.read(hw, offset, wwpn_prefix);
3666
3667wwn_prefix_out:
3668	return IXGBE_SUCCESS;
3669}
3670
3671/**
3672 *  ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
3673 *  @hw: pointer to hardware structure
3674 *  @bs: the fcoe boot status
3675 *
3676 *  This function will read the FCOE boot status from the iSCSI FCOE block
3677 **/
3678s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
3679{
3680	u16 offset, caps, flags;
3681	s32 status;
3682
3683	DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
3684
3685	/* clear output first */
3686	*bs = ixgbe_fcoe_bootstatus_unavailable;
3687
3688	/* check if FCOE IBA block is present */
3689	offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
3690	status = hw->eeprom.ops.read(hw, offset, &caps);
3691	if (status != IXGBE_SUCCESS)
3692		goto out;
3693
3694	if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
3695		goto out;
3696
3697	/* check if iSCSI FCOE block is populated */
3698	status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
3699	if (status != IXGBE_SUCCESS)
3700		goto out;
3701
3702	if ((offset == 0) || (offset == 0xFFFF))
3703		goto out;
3704
3705	/* read fcoe flags in iSCSI FCOE block */
3706	offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
3707	status = hw->eeprom.ops.read(hw, offset, &flags);
3708	if (status != IXGBE_SUCCESS)
3709		goto out;
3710
3711	if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
3712		*bs = ixgbe_fcoe_bootstatus_enabled;
3713	else
3714		*bs = ixgbe_fcoe_bootstatus_disabled;
3715
3716out:
3717	return status;
3718}
3719
3720/**
3721 *  ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
3722 *  control
3723 *  @hw: pointer to hardware structure
3724 *
3725 *  There are several phys that do not support autoneg flow control. This
3726 *  function check the device id to see if the associated phy supports
3727 *  autoneg flow control.
3728 **/
3729static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
3730{
3731
3732	DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
3733
3734	switch (hw->device_id) {
3735	case IXGBE_DEV_ID_X540T:
3736		return IXGBE_SUCCESS;
3737	case IXGBE_DEV_ID_82599_T3_LOM:
3738		return IXGBE_SUCCESS;
3739	default:
3740		return IXGBE_ERR_FC_NOT_SUPPORTED;
3741	}
3742}
3743
3744/**
3745 *  ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
3746 *  @hw: pointer to hardware structure
3747 *  @enable: enable or disable switch for anti-spoofing
3748 *  @pf: Physical Function pool - do not enable anti-spoofing for the PF
3749 *
3750 **/
3751void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
3752{
3753	int j;
3754	int pf_target_reg = pf >> 3;
3755	int pf_target_shift = pf % 8;
3756	u32 pfvfspoof = 0;
3757
3758	if (hw->mac.type == ixgbe_mac_82598EB)
3759		return;
3760
3761	if (enable)
3762		pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
3763
3764	/*
3765	 * PFVFSPOOF register array is size 8 with 8 bits assigned to
3766	 * MAC anti-spoof enables in each register array element.
3767	 */
3768	for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
3769		IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
3770
3771	/* If not enabling anti-spoofing then done */
3772	if (!enable)
3773		return;
3774
3775	/*
3776	 * The PF should be allowed to spoof so that it can support
3777	 * emulation mode NICs.  Reset the bit assigned to the PF
3778	 */
3779	pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg));
3780	pfvfspoof ^= (1 << pf_target_shift);
3781	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof);
3782}
3783
3784/**
3785 *  ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
3786 *  @hw: pointer to hardware structure
3787 *  @enable: enable or disable switch for VLAN anti-spoofing
3788 *  @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
3789 *
3790 **/
3791void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
3792{
3793	int vf_target_reg = vf >> 3;
3794	int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
3795	u32 pfvfspoof;
3796
3797	if (hw->mac.type == ixgbe_mac_82598EB)
3798		return;
3799
3800	pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
3801	if (enable)
3802		pfvfspoof |= (1 << vf_target_shift);
3803	else
3804		pfvfspoof &= ~(1 << vf_target_shift);
3805	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
3806}
3807
3808/**
3809 *  ixgbe_get_device_caps_generic - Get additional device capabilities
3810 *  @hw: pointer to hardware structure
3811 *  @device_caps: the EEPROM word with the extra device capabilities
3812 *
3813 *  This function will read the EEPROM location for the device capabilities,
3814 *  and return the word through device_caps.
3815 **/
3816s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
3817{
3818	DEBUGFUNC("ixgbe_get_device_caps_generic");
3819
3820	hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
3821
3822	return IXGBE_SUCCESS;
3823}
3824
3825/**
3826 *  ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
3827 *  @hw: pointer to hardware structure
3828 *
3829 **/
3830void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
3831{
3832	u32 regval;
3833	u32 i;
3834
3835	DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
3836
3837	/* Enable relaxed ordering */
3838	for (i = 0; i < hw->mac.max_tx_queues; i++) {
3839		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
3840		regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
3841		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
3842	}
3843
3844	for (i = 0; i < hw->mac.max_rx_queues; i++) {
3845		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
3846		regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
3847			   IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
3848		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
3849	}
3850
3851}
3852
3853/**
3854 *  ixgbe_calculate_checksum - Calculate checksum for buffer
3855 *  @buffer: pointer to EEPROM
3856 *  @length: size of EEPROM to calculate a checksum for
3857 *  Calculates the checksum for some buffer on a specified length.  The
3858 *  checksum calculated is returned.
3859 **/
3860static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
3861{
3862	u32 i;
3863	u8 sum = 0;
3864
3865	DEBUGFUNC("ixgbe_calculate_checksum");
3866
3867	if (!buffer)
3868		return 0;
3869
3870	for (i = 0; i < length; i++)
3871		sum += buffer[i];
3872
3873	return (u8) (0 - sum);
3874}
3875
3876/**
3877 *  ixgbe_host_interface_command - Issue command to manageability block
3878 *  @hw: pointer to the HW structure
3879 *  @buffer: contains the command to write and where the return status will
3880 *   be placed
3881 *  @lenght: lenght of buffer, must be multiple of 4 bytes
3882 *
3883 *  Communicates with the manageability block.  On success return IXGBE_SUCCESS
3884 *  else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
3885 **/
3886static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
3887					u32 length)
3888{
3889	u32 hicr, i, bi;
3890	u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
3891	u8 buf_len, dword_len;
3892
3893	s32 ret_val = IXGBE_SUCCESS;
3894
3895	DEBUGFUNC("ixgbe_host_interface_command");
3896
3897	if (length == 0 || length & 0x3 ||
3898	    length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
3899		DEBUGOUT("Buffer length failure.\n");
3900		ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3901		goto out;
3902	}
3903
3904	/* Check that the host interface is enabled. */
3905	hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3906	if ((hicr & IXGBE_HICR_EN) == 0) {
3907		DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
3908		ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3909		goto out;
3910	}
3911
3912	/* Calculate length in DWORDs */
3913	dword_len = length >> 2;
3914
3915	/*
3916	 * The device driver writes the relevant command block
3917	 * into the ram area.
3918	 */
3919	for (i = 0; i < dword_len; i++)
3920		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3921				      i, IXGBE_CPU_TO_LE32(buffer[i]));
3922
3923	/* Setting this bit tells the ARC that a new command is pending. */
3924	IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
3925
3926	for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
3927		hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3928		if (!(hicr & IXGBE_HICR_C))
3929			break;
3930		msec_delay(1);
3931	}
3932
3933	/* Check command successful completion. */
3934	if (i == IXGBE_HI_COMMAND_TIMEOUT ||
3935	    (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
3936		DEBUGOUT("Command has failed with no status valid.\n");
3937		ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3938		goto out;
3939	}
3940
3941	/* Calculate length in DWORDs */
3942	dword_len = hdr_size >> 2;
3943
3944	/* first pull in the header so we know the buffer length */
3945	for (bi = 0; bi < dword_len; bi++) {
3946		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
3947		IXGBE_LE32_TO_CPUS(&buffer[bi]);
3948	}
3949
3950	/* If there is any thing in data position pull it in */
3951	buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
3952	if (buf_len == 0)
3953		goto out;
3954
3955	if (length < (buf_len + hdr_size)) {
3956		DEBUGOUT("Buffer not large enough for reply message.\n");
3957		ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3958		goto out;
3959	}
3960
3961	/* Calculate length in DWORDs, add 3 for odd lengths */
3962	dword_len = (buf_len + 3) >> 2;
3963
3964	/* Pull in the rest of the buffer (bi is where we left off)*/
3965	for (; bi <= dword_len; bi++) {
3966		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
3967		IXGBE_LE32_TO_CPUS(&buffer[bi]);
3968	}
3969
3970out:
3971	return ret_val;
3972}
3973
3974/**
3975 *  ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
3976 *  @hw: pointer to the HW structure
3977 *  @maj: driver version major number
3978 *  @min: driver version minor number
3979 *  @build: driver version build number
3980 *  @sub: driver version sub build number
3981 *
3982 *  Sends driver version number to firmware through the manageability
3983 *  block.  On success return IXGBE_SUCCESS
3984 *  else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
3985 *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
3986 **/
3987s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
3988				 u8 build, u8 sub)
3989{
3990	struct ixgbe_hic_drv_info fw_cmd;
3991	int i;
3992	s32 ret_val = IXGBE_SUCCESS;
3993
3994	DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
3995
3996	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)
3997	    != IXGBE_SUCCESS) {
3998		ret_val = IXGBE_ERR_SWFW_SYNC;
3999		goto out;
4000	}
4001
4002	fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4003	fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4004	fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4005	fw_cmd.port_num = (u8)hw->bus.func;
4006	fw_cmd.ver_maj = maj;
4007	fw_cmd.ver_min = min;
4008	fw_cmd.ver_build = build;
4009	fw_cmd.ver_sub = sub;
4010	fw_cmd.hdr.checksum = 0;
4011	fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4012				(FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4013	fw_cmd.pad = 0;
4014	fw_cmd.pad2 = 0;
4015
4016	for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4017		ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4018						       sizeof(fw_cmd));
4019		if (ret_val != IXGBE_SUCCESS)
4020			continue;
4021
4022		if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4023		    FW_CEM_RESP_STATUS_SUCCESS)
4024			ret_val = IXGBE_SUCCESS;
4025		else
4026			ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4027
4028		break;
4029	}
4030
4031	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4032out:
4033	return ret_val;
4034}
4035
4036/**
4037 * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4038 * @hw: pointer to hardware structure
4039 * @num_pb: number of packet buffers to allocate
4040 * @headroom: reserve n KB of headroom
4041 * @strategy: packet buffer allocation strategy
4042 **/
4043void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4044			     int strategy)
4045{
4046	u32 pbsize = hw->mac.rx_pb_size;
4047	int i = 0;
4048	u32 rxpktsize, txpktsize, txpbthresh;
4049
4050	/* Reserve headroom */
4051	pbsize -= headroom;
4052
4053	if (!num_pb)
4054		num_pb = 1;
4055
4056	/* Divide remaining packet buffer space amongst the number of packet
4057	 * buffers requested using supplied strategy.
4058	 */
4059	switch (strategy) {
4060	case (PBA_STRATEGY_WEIGHTED):
4061		/* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4062		 * buffer with 5/8 of the packet buffer space.
4063		 */
4064		rxpktsize = (pbsize * 5 * 2) / (num_pb * 8);
4065		pbsize -= rxpktsize * (num_pb / 2);
4066		rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4067		for (; i < (num_pb / 2); i++)
4068			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4069		/* Fall through to configure remaining packet buffers */
4070	case (PBA_STRATEGY_EQUAL):
4071		rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4072		for (; i < num_pb; i++)
4073			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4074		break;
4075	default:
4076		break;
4077	}
4078
4079	/* Only support an equally distributed Tx packet buffer strategy. */
4080	txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4081	txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4082	for (i = 0; i < num_pb; i++) {
4083		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4084		IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4085	}
4086
4087	/* Clear unused TCs, if any, to zero buffer size*/
4088	for (; i < IXGBE_MAX_PB; i++) {
4089		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4090		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4091		IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4092	}
4093}
4094
4095/**
4096 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4097 * @hw: pointer to the hardware structure
4098 *
4099 * The 82599 and x540 MACs can experience issues if TX work is still pending
4100 * when a reset occurs.  This function prevents this by flushing the PCIe
4101 * buffers on the system.
4102 **/
4103void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4104{
4105	u32 gcr_ext, hlreg0;
4106
4107	/*
4108	 * If double reset is not requested then all transactions should
4109	 * already be clear and as such there is no work to do
4110	 */
4111	if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4112		return;
4113
4114	/*
4115	 * Set loopback enable to prevent any transmits from being sent
4116	 * should the link come up.  This assumes that the RXCTRL.RXEN bit
4117	 * has already been cleared.
4118	 */
4119	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4120	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4121
4122	/* initiate cleaning flow for buffers in the PCIe transaction layer */
4123	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4124	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4125			gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4126
4127	/* Flush all writes and allow 20usec for all transactions to clear */
4128	IXGBE_WRITE_FLUSH(hw);
4129	usec_delay(20);
4130
4131	/* restore previous register values */
4132	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4133	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4134}
4135
4136