1 /*
2  * CDDL HEADER START
3  *
4  * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at:
10  *      http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When using or redistributing this file, you may do so under the
15  * License only. No other modification of this header is permitted.
16  *
17  * If applicable, add the following below this CDDL HEADER, with the
18  * fields enclosed by brackets "[]" replaced with your own identifying
19  * information: Portions Copyright [yyyy] [name of copyright owner]
20  *
21  * CDDL HEADER END
22  */
23 
24 /*
25  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 /* IntelVersion: 1.162 sol_ixgbe_shared_339b */
30 
31 #include "ixgbe_type.h"
32 #include "ixgbe_api.h"
33 #include "ixgbe_common.h"
34 #include "ixgbe_phy.h"
35 
36 u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw);
37 s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
38 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
39     ixgbe_link_speed *speed, bool *autoneg);
40 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
41 s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num);
42 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
43 					bool autoneg_wait_to_complete);
44 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
45     ixgbe_link_speed *speed, bool *link_up, bool link_up_wait_to_complete);
46 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
47     ixgbe_link_speed speed, bool autoneg,
48     bool autoneg_wait_to_complete);
49 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
50     ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete);
51 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
52 s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
53 void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw);
54 s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
55 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
56 s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan,
57     u32 vind, bool vlan_on);
58 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
59 s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
60 s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
61 s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
62     u8 *eeprom_data);
63 u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
64 s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
65 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
66 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
67 static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw);
68 
69 /*
70  * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
71  * @hw: pointer to the HW structure
72  *
73  * The defaults for 82598 should be in the range of 50us to 50ms,
74  * however the hardware default for these parts is 500us to 1ms which is less
75  * than the 10ms recommended by the pci-e spec.  To address this we need to
76  * increase the value to either 10ms to 250ms for capability version 1 config,
77  * or 16ms to 55ms for version 2.
78  */
79 void
80 ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
81 {
82 	u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
83 	u16 pcie_devctl2;
84 
85 	/* only take action if timeout value is defaulted to 0 */
86 	if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
87 		goto out;
88 
89 	/*
90 	 * if capababilities version is type 1 we can write the
91 	 * timeout of 10ms to 250ms through the GCR register
92 	 */
93 	if (!(gcr & IXGBE_GCR_CAP_VER2)) {
94 		gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
95 		goto out;
96 	}
97 
98 	/*
99 	 * for version 2 capabilities we need to write the config space
100 	 * directly in order to set the completion timeout value for
101 	 * 16ms to 55ms
102 	 */
103 	pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
104 	pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
105 	IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
106 out:
107 	/* disable completion timeout resend */
108 	gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
109 	IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
110 }
111 
112 /*
113  * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
114  * @hw: pointer to hardware structure
115  *
116  * Read PCIe configuration space, and get the MSI-X vector count from
117  * the capabilities table.
118  */
119 u32
120 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
121 {
122 	u32 msix_count = 18;
123 
124 	DEBUGFUNC("ixgbe_get_pcie_msix_count_82598");
125 
126 	if (hw->mac.msix_vectors_from_pcie) {
127 		msix_count = IXGBE_READ_PCIE_WORD(hw,
128 		    IXGBE_PCIE_MSIX_82598_CAPS);
129 		msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
130 
131 		/*
132 		 * MSI-X count is zero-based in HW, so increment to give
133 		 * proper value
134 		 */
135 		msix_count++;
136 	}
137 	return (msix_count);
138 }
139 
140 /*
141  * ixgbe_init_ops_82598 - Inits func ptrs and MAC type
142  * @hw: pointer to hardware structure
143  *
144  * Initialize the function pointers and assign the MAC type for 82598.
145  * Does not touch the hardware.
146  */
147 s32
148 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
149 {
150 	struct ixgbe_mac_info *mac = &hw->mac;
151 	struct ixgbe_phy_info *phy = &hw->phy;
152 	s32 ret_val;
153 
154 	DEBUGFUNC("ixgbe_init_ops_82598");
155 
156 	ret_val = ixgbe_init_phy_ops_generic(hw);
157 	ret_val = ixgbe_init_ops_generic(hw);
158 
159 	/* PHY */
160 	phy->ops.init = &ixgbe_init_phy_ops_82598;
161 
162 	/* MAC */
163 	mac->ops.start_hw = &ixgbe_start_hw_82598;
164 	mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
165 	mac->ops.reset_hw = &ixgbe_reset_hw_82598;
166 	mac->ops.get_media_type = &ixgbe_get_media_type_82598;
167 	mac->ops.get_supported_physical_layer =
168 	    &ixgbe_get_supported_physical_layer_82598;
169 	mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
170 	mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
171 	mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
172 
173 	/* RAR, Multicast, VLAN */
174 	mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
175 	mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
176 	mac->ops.set_vfta = &ixgbe_set_vfta_82598;
177 	mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
178 
179 	/* Flow Control */
180 	mac->ops.fc_enable = &ixgbe_fc_enable_82598;
181 
182 	mac->mcft_size = 128;
183 	mac->vft_size = 128;
184 	mac->num_rar_entries = 16;
185 	mac->max_tx_queues = 32;
186 	mac->max_rx_queues = 64;
187 	mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
188 
189 	/* SFP+ Module */
190 	phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
191 
192 	/* Link */
193 	mac->ops.check_link = &ixgbe_check_mac_link_82598;
194 	mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
195 	mac->ops.get_link_capabilities =
196 	    &ixgbe_get_link_capabilities_82598;
197 
198 	return (ret_val);
199 }
200 
201 /*
202  * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
203  * @hw: pointer to hardware structure
204  *
205  * Initialize any function pointers that were not able to be
206  * set during init_shared_code because the PHY/SFP type was
207  * not known.  Perform the SFP init if necessary.
208  *
209  */
210 s32
211 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
212 {
213 	struct ixgbe_mac_info *mac = &hw->mac;
214 	struct ixgbe_phy_info *phy = &hw->phy;
215 	s32 ret_val = IXGBE_SUCCESS;
216 	u16 list_offset, data_offset;
217 
218 	DEBUGFUNC("ixgbe_init_phy_ops_82598");
219 
220 	/* Identify the PHY */
221 	phy->ops.identify(hw);
222 
223 	/* Overwrite the link function pointers if copper PHY */
224 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
225 		mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
226 		mac->ops.get_link_capabilities =
227 		    &ixgbe_get_copper_link_capabilities_generic;
228 	}
229 
230 	switch (hw->phy.type) {
231 	case ixgbe_phy_tn:
232 		phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
233 		phy->ops.check_link = &ixgbe_check_phy_link_tnx;
234 		phy->ops.get_firmware_version =
235 		    &ixgbe_get_phy_firmware_version_tnx;
236 		break;
237 	case ixgbe_phy_aq:
238 		phy->ops.get_firmware_version =
239 		    &ixgbe_get_phy_firmware_version_generic;
240 		break;
241 	case ixgbe_phy_nl:
242 		phy->ops.reset = &ixgbe_reset_phy_nl;
243 
244 		/* Call SFP+ identify routine to get the SFP+ module type */
245 		ret_val = phy->ops.identify_sfp(hw);
246 		if (ret_val != IXGBE_SUCCESS)
247 			goto out;
248 		else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
249 			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
250 			goto out;
251 		}
252 
253 		/* Check to see if SFP+ module is supported */
254 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
255 		    &list_offset, &data_offset);
256 		if (ret_val != IXGBE_SUCCESS) {
257 			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
258 			goto out;
259 		}
260 		break;
261 	default:
262 		break;
263 	}
264 out:
265 	return (ret_val);
266 }
267 
268 /*
269  * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
270  * @hw: pointer to hardware structure
271  *
272  * Starts the hardware using the generic start_hw function.
273  * Disables relaxed ordering Then set pcie completion timeout
274  */
275 s32
276 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
277 {
278 	u32 regval;
279 	u32 i;
280 	s32 ret_val = IXGBE_SUCCESS;
281 
282 	DEBUGFUNC("ixgbe_start_hw_82598");
283 
284 	ret_val = ixgbe_start_hw_generic(hw);
285 
286 	/*
287 	 * Disable relaxed ordering
288 	 */
289 	for (i = 0; ((i < hw->mac.max_tx_queues) &&
290 	    (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
291 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
292 		regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
293 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
294 	}
295 
296 	for (i = 0; ((i < hw->mac.max_rx_queues) &&
297 	    (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
298 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
299 		regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
300 		    IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
301 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
302 	}
303 
304 	/* set the completion timeout for interface */
305 	if (ret_val == IXGBE_SUCCESS)
306 		ixgbe_set_pcie_completion_timeout(hw);
307 
308 	return (ret_val);
309 }
310 
311 /*
312  * ixgbe_get_link_capabilities_82598 - Determines link capabilities
313  * @hw: pointer to hardware structure
314  * @speed: pointer to link speed
315  * @autoneg: boolean auto-negotiation value
316  *
317  * Determines the link capabilities by reading the AUTOC register.
318  */
319 static s32
320 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
321     ixgbe_link_speed *speed, bool *autoneg)
322 {
323 	s32 status = IXGBE_SUCCESS;
324 	u32 autoc = 0;
325 
326 	DEBUGFUNC("ixgbe_get_link_capabilities_82598");
327 
328 	/*
329 	 * Determine link capabilities based on the stored value of AUTOC,
330 	 * which represents EEPROM defaults.  If AUTOC value has not been
331 	 * stored, use the current register value.
332 	 */
333 	if (hw->mac.orig_link_settings_stored)
334 		autoc = hw->mac.orig_autoc;
335 	else
336 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
337 
338 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
339 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
340 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
341 		*autoneg = false;
342 		break;
343 
344 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
345 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
346 		*autoneg = false;
347 		break;
348 
349 	case IXGBE_AUTOC_LMS_1G_AN:
350 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
351 		*autoneg = true;
352 		break;
353 
354 	case IXGBE_AUTOC_LMS_KX4_AN:
355 	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
356 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
357 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
358 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
359 		if (autoc & IXGBE_AUTOC_KX_SUPP)
360 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
361 		*autoneg = true;
362 		break;
363 
364 	default:
365 		status = IXGBE_ERR_LINK_SETUP;
366 		break;
367 	}
368 
369 	return (status);
370 }
371 
372 /*
373  * ixgbe_get_media_type_82598 - Determines media type
374  * @hw: pointer to hardware structure
375  *
376  * Returns the media type (fiber, copper, backplane)
377  */
378 static enum ixgbe_media_type
379 ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
380 {
381 	enum ixgbe_media_type media_type;
382 
383 	DEBUGFUNC("ixgbe_get_media_type_82598");
384 
385 	/* Detect if there is a copper PHY attached. */
386 	if (hw->phy.type == ixgbe_phy_cu_unknown ||
387 	    hw->phy.type == ixgbe_phy_tn ||
388 	    hw->phy.type == ixgbe_phy_aq) {
389 		media_type = ixgbe_media_type_copper;
390 		goto out;
391 	}
392 
393 	/* Media type for I82598 is based on device ID */
394 	switch (hw->device_id) {
395 	case IXGBE_DEV_ID_82598:
396 	case IXGBE_DEV_ID_82598_BX:
397 		/* Default device ID is mezzanine card KX/KX4 */
398 		media_type = ixgbe_media_type_backplane;
399 		break;
400 	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
401 	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
402 	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
403 	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
404 	case IXGBE_DEV_ID_82598EB_XF_LR:
405 	case IXGBE_DEV_ID_82598EB_SFP_LOM:
406 		media_type = ixgbe_media_type_fiber;
407 		break;
408 	case IXGBE_DEV_ID_82598EB_CX4:
409 	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
410 		media_type = ixgbe_media_type_cx4;
411 		break;
412 	case IXGBE_DEV_ID_82598AT:
413 	case IXGBE_DEV_ID_82598AT2:
414 		media_type = ixgbe_media_type_copper;
415 		break;
416 	default:
417 		media_type = ixgbe_media_type_unknown;
418 		break;
419 	}
420 out:
421 	return (media_type);
422 }
423 
424 /*
425  * ixgbe_fc_enable_82598 - Enable flow control
426  * @hw: pointer to hardware structure
427  * @packetbuf_num: packet buffer number (0-7)
428  *
429  * Enable flow control according to the current settings.
430  */
431 s32
432 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
433 {
434 	s32 ret_val = IXGBE_SUCCESS;
435 	u32 fctrl_reg;
436 	u32 rmcs_reg;
437 	u32 reg;
438 	u32 link_speed = 0;
439 	bool link_up;
440 
441 	DEBUGFUNC("ixgbe_fc_enable_82598");
442 
443 	/*
444 	 * On 82598 backplane having FC on causes resets while doing
445 	 * KX, so turn off here.
446 	 */
447 	hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
448 	if (link_up &&
449 	    link_speed == IXGBE_LINK_SPEED_1GB_FULL &&
450 	    hw->mac.ops.get_media_type(hw) == ixgbe_media_type_backplane) {
451 		hw->fc.disable_fc_autoneg = true;
452 		hw->fc.requested_mode = ixgbe_fc_none;
453 	}
454 
455 	/* Negotiate the fc mode to use */
456 	ret_val = ixgbe_fc_autoneg(hw);
457 	if (ret_val)
458 		goto out;
459 
460 	/* Disable any previous flow control settings */
461 	fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
462 	fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
463 
464 	rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
465 	rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
466 
467 	/*
468 	 * The possible values of fc.current_mode are:
469 	 * 0: Flow control is completely disabled
470 	 * 1: Rx flow control is enabled (we can receive pause frames,
471 	 *    but not send pause frames).
472 	 * 2: Tx flow control is enabled (we can send pause frames but
473 	 *    we do not support receiving pause frames).
474 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
475 	 * other: Invalid.
476 	 */
477 	switch (hw->fc.current_mode) {
478 	case ixgbe_fc_none:
479 		/*
480 		 * Flow control is disabled by software override or autoneg.
481 		 * The code below will actually disable it in the HW.
482 		 */
483 		break;
484 	case ixgbe_fc_rx_pause:
485 		/*
486 		 * Rx Flow control is enabled and Tx Flow control is
487 		 * disabled by software override. Since there really
488 		 * isn't a way to advertise that we are capable of RX
489 		 * Pause ONLY, we will advertise that we support both
490 		 * symmetric and asymmetric Rx PAUSE.  Later, we will
491 		 * disable the adapter's ability to send PAUSE frames.
492 		 */
493 		fctrl_reg |= IXGBE_FCTRL_RFCE;
494 		break;
495 	case ixgbe_fc_tx_pause:
496 		/*
497 		 * Tx Flow control is enabled, and Rx Flow control is
498 		 * disabled by software override.
499 		 */
500 		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
501 		break;
502 	case ixgbe_fc_full:
503 		/* Flow control (both Rx and Tx) is enabled by SW override. */
504 		fctrl_reg |= IXGBE_FCTRL_RFCE;
505 		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
506 		break;
507 	default:
508 		DEBUGOUT("Flow control param set incorrectly\n");
509 		ret_val = IXGBE_ERR_CONFIG;
510 		goto out;
511 	}
512 
513 	/* Set 802.3x based flow control settings. */
514 	fctrl_reg |= IXGBE_FCTRL_DPF;
515 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
516 	IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
517 
518 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
519 	if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
520 		if (hw->fc.send_xon) {
521 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
522 			    (hw->fc.low_water | IXGBE_FCRTL_XONE));
523 		} else {
524 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
525 			    hw->fc.low_water);
526 		}
527 
528 		IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
529 		    (hw->fc.high_water | IXGBE_FCRTH_FCEN));
530 	}
531 
532 	/* Configure pause time (2 TCs per register) */
533 	reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
534 	if ((packetbuf_num & 1) == 0)
535 		reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
536 	else
537 		reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
538 	IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
539 
540 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
541 
542 out:
543 	return (ret_val);
544 }
545 
546 /*
547  * ixgbe_start_mac_link_82598 - Configures MAC link settings
548  * @hw: pointer to hardware structure
549  *
550  * Configures link settings based on values in the ixgbe_hw struct.
551  * Restarts the link.  Performs autonegotiation if needed.
552  */
553 static s32
554 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, bool autoneg_wait_to_complete)
555 {
556 	u32 autoc_reg;
557 	u32 links_reg;
558 	u32 i;
559 	s32 status = IXGBE_SUCCESS;
560 
561 	DEBUGFUNC("ixgbe_start_mac_link_82598");
562 
563 	/* Restart link */
564 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
565 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
566 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
567 
568 	/* Only poll for autoneg to complete if specified to do so */
569 	if (autoneg_wait_to_complete) {
570 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
571 		    IXGBE_AUTOC_LMS_KX4_AN ||
572 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
573 		    IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
574 			links_reg = 0; /* Just in case Autoneg time = 0 */
575 			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
576 				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
577 				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
578 					break;
579 				msec_delay(100);
580 			}
581 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
582 				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
583 				DEBUGOUT("Autonegotiation did not complete.\n");
584 			}
585 		}
586 	}
587 
588 	/* Add delay to filter out noises during initial link setup */
589 	msec_delay(50);
590 
591 	return (status);
592 }
593 
594 /*
595  * ixgbe_check_mac_link_82598 - Get link/speed status
596  * @hw: pointer to hardware structure
597  * @speed: pointer to link speed
598  * @link_up: true is link is up, false otherwise
599  * @link_up_wait_to_complete: bool used to wait for link up or not
600  *
601  * Reads the links register to determine if link is up and the current speed
602  */
603 static s32
604 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
605     bool *link_up, bool link_up_wait_to_complete)
606 {
607 	u32 links_reg;
608 	u32 i;
609 	u16 link_reg, adapt_comp_reg;
610 
611 	DEBUGFUNC("ixgbe_check_mac_link_82598");
612 
613 	/*
614 	 * SERDES PHY requires us to read link status from undocumented
615 	 * register 0xC79F.  Bit 0 set indicates link is up/ready; clear
616 	 * indicates link down.  OxC00C is read to check that the XAUI lanes
617 	 * are active.  Bit 0 clear indicates active; set indicates inactive.
618 	 */
619 	if (hw->phy.type == ixgbe_phy_nl) {
620 		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
621 		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
622 		hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
623 		    &adapt_comp_reg);
624 		if (link_up_wait_to_complete) {
625 			for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
626 				if ((link_reg & 1) &&
627 				    ((adapt_comp_reg & 1) == 0)) {
628 					*link_up = true;
629 					break;
630 				} else {
631 					*link_up = false;
632 				}
633 				msec_delay(100);
634 				hw->phy.ops.read_reg(hw, 0xC79F,
635 				    IXGBE_TWINAX_DEV, &link_reg);
636 				hw->phy.ops.read_reg(hw, 0xC00C,
637 				    IXGBE_TWINAX_DEV, &adapt_comp_reg);
638 			}
639 		} else {
640 			if ((link_reg & 1) &&
641 			    ((adapt_comp_reg & 1) == 0))
642 				*link_up = true;
643 			else
644 				*link_up = false;
645 		}
646 
647 		if (*link_up == false)
648 			goto out;
649 	}
650 
651 	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
652 	if (link_up_wait_to_complete) {
653 		for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
654 			if (links_reg & IXGBE_LINKS_UP) {
655 				*link_up = true;
656 				break;
657 			} else {
658 				*link_up = false;
659 			}
660 			msec_delay(100);
661 			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
662 		}
663 	} else {
664 		if (links_reg & IXGBE_LINKS_UP)
665 			*link_up = true;
666 		else
667 			*link_up = false;
668 	}
669 
670 	if (links_reg & IXGBE_LINKS_SPEED)
671 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
672 	else
673 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
674 
675 	if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) &&
676 	    (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
677 		*link_up = false;
678 
679 	/* if link is down, zero out the current_mode */
680 	if (*link_up == false) {
681 		hw->fc.current_mode = ixgbe_fc_none;
682 		hw->fc.fc_was_autonegged = false;
683 	}
684 
685 out:
686 	return (IXGBE_SUCCESS);
687 }
688 
689 /*
690  * ixgbe_setup_mac_link_82598 - Set MAC link speed
691  * @hw: pointer to hardware structure
692  * @speed: new link speed
693  * @autoneg: true if autonegotiation enabled
694  * @autoneg_wait_to_complete: true when waiting for completion is needed
695  *
696  * Set the link speed in the AUTOC register and restarts link.
697  */
698 static s32
699 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
700     ixgbe_link_speed speed, bool autoneg,
701     bool autoneg_wait_to_complete)
702 {
703 	s32 status = IXGBE_SUCCESS;
704 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
705 	u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
706 	u32 autoc = curr_autoc;
707 	u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
708 
709 	DEBUGFUNC("ixgbe_setup_mac_link_82598");
710 
711 	/* Check to see if speed passed in is supported. */
712 	(void) ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
713 	speed &= link_capabilities;
714 
715 	if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
716 		status = IXGBE_ERR_LINK_SETUP;
717 	} else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
718 	    link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
719 		/* Set KX4/KX support according to speed requested */
720 		autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
721 		if (speed & IXGBE_LINK_SPEED_10GB_FULL)
722 			autoc |= IXGBE_AUTOC_KX4_SUPP;
723 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
724 			autoc |= IXGBE_AUTOC_KX_SUPP;
725 		if (autoc != curr_autoc)
726 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
727 	}
728 
729 	if (status == IXGBE_SUCCESS) {
730 		/*
731 		 * Setup and restart the link based on the new values in
732 		 * ixgbe_hw This will write the AUTOC register based on the new
733 		 * stored values
734 		 */
735 		status = ixgbe_start_mac_link_82598(hw,
736 		    autoneg_wait_to_complete);
737 	}
738 
739 	return (status);
740 }
741 
742 /*
743  * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
744  * @hw: pointer to hardware structure
745  * @speed: new link speed
746  * @autoneg: true if autonegotiation enabled
747  * @autoneg_wait_to_complete: true if waiting is needed to complete
748  *
749  * Sets the link speed in the AUTOC register in the MAC and restarts link.
750  */
751 static s32
752 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
753     ixgbe_link_speed speed,
754     bool autoneg,
755     bool autoneg_wait_to_complete)
756 {
757 	s32 status;
758 
759 	DEBUGFUNC("ixgbe_setup_copper_link_82598");
760 
761 	/* Setup the PHY according to input speed */
762 	status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
763 	    autoneg_wait_to_complete);
764 
765 	/* Set up MAC */
766 	(void) ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
767 
768 	return (status);
769 }
770 
771 /*
772  * ixgbe_reset_hw_82598 - Performs hardware reset
773  * @hw: pointer to hardware structure
774  *
775  * Resets the hardware by resetting the transmit and receive units, masks and
776  * clears all interrupts, performing a PHY reset, and performing a link (MAC)
777  * reset.
778  */
779 static s32
780 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
781 {
782 	s32 status = IXGBE_SUCCESS;
783 	s32 phy_status = IXGBE_SUCCESS;
784 	u32 ctrl;
785 	u32 gheccr;
786 	u32 i;
787 	u32 autoc;
788 	u8  analog_val;
789 
790 	DEBUGFUNC("ixgbe_reset_hw_82598");
791 
792 	/* Call adapter stop to disable tx/rx and clear interrupts */
793 	hw->mac.ops.stop_adapter(hw);
794 
795 	/*
796 	 * Power up the Atlas Tx lanes if they are currently powered down.
797 	 * Atlas Tx lanes are powered down for MAC loopback tests, but
798 	 * they are not automatically restored on reset.
799 	 */
800 	hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
801 	if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
802 		/* Enable Tx Atlas so packets can be transmitted again */
803 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
804 		    &analog_val);
805 		analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
806 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
807 		    analog_val);
808 
809 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
810 		    &analog_val);
811 		analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
812 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
813 		    analog_val);
814 
815 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
816 		    &analog_val);
817 		analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
818 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
819 		    analog_val);
820 
821 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
822 		    &analog_val);
823 		analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
824 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
825 		    analog_val);
826 	}
827 
828 	/* Reset PHY */
829 	if (hw->phy.reset_disable == false) {
830 		/* PHY ops must be identified and initialized prior to reset */
831 
832 		/* Init PHY and function pointers, perform SFP setup */
833 		phy_status = hw->phy.ops.init(hw);
834 		if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
835 			goto reset_hw_out;
836 		else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
837 			goto no_phy_reset;
838 
839 		hw->phy.ops.reset(hw);
840 	}
841 
842 no_phy_reset:
843 	/*
844 	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
845 	 * access and verify no pending requests before reset
846 	 */
847 	(void) ixgbe_disable_pcie_master(hw);
848 
849 mac_reset_top:
850 
851 	/*
852 	 * Issue global reset to the MAC.  This needs to be a SW reset.
853 	 * If link reset is used, it might reset the MAC when mng is using it
854 	 */
855 	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
856 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
857 	IXGBE_WRITE_FLUSH(hw);
858 
859 	/* Poll for reset bit to self-clear indicating reset is complete */
860 	for (i = 0; i < 10; i++) {
861 		usec_delay(1);
862 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
863 		if (!(ctrl & IXGBE_CTRL_RST))
864 			break;
865 	}
866 	if (ctrl & IXGBE_CTRL_RST) {
867 		status = IXGBE_ERR_RESET_FAILED;
868 		DEBUGOUT("Reset polling failed to complete.\n");
869 	}
870 
871 	/*
872 	 * Double resets are required for recovery from certain error
873 	 * conditions.  Between resets, it is necessary to stall to allow time
874 	 * for any pending HW events to complete.  We use 1usec since that is
875 	 * what is needed for ixgbe_disable_pcie_master().  The second reset
876 	 * then clears out any effects of those events.
877 	 */
878 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
879 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
880 		usec_delay(1);
881 		goto mac_reset_top;
882 	}
883 	msec_delay(50);
884 
885 	gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
886 	gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
887 	IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
888 
889 	/*
890 	 * Store the original AUTOC value if it has not been
891 	 * stored off yet.  Otherwise restore the stored original
892 	 * AUTOC value since the reset operation sets back to deaults.
893 	 */
894 	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
895 	if (hw->mac.orig_link_settings_stored == false) {
896 		hw->mac.orig_autoc = autoc;
897 		hw->mac.orig_link_settings_stored = true;
898 	} else if (autoc != hw->mac.orig_autoc) {
899 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
900 	}
901 
902 	/* Store the permanent mac address */
903 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
904 
905 	/*
906 	 * Store MAC address from RAR0, clear receive address registers, and
907 	 * clear the multicast table
908 	 */
909 	hw->mac.ops.init_rx_addrs(hw);
910 
911 reset_hw_out:
912 	if (phy_status != IXGBE_SUCCESS)
913 		status = phy_status;
914 
915 	return (status);
916 }
917 
918 /*
919  * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
920  * @hw: pointer to hardware struct
921  * @rar: receive address register index to associate with a VMDq index
922  * @vmdq: VMDq set index
923  */
924 s32
925 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
926 {
927 	u32 rar_high;
928 
929 	DEBUGFUNC("ixgbe_set_vmdq_82598");
930 
931 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
932 	rar_high &= ~IXGBE_RAH_VIND_MASK;
933 	rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
934 	IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
935 	return (IXGBE_SUCCESS);
936 }
937 
938 /*
939  * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
940  * @hw: pointer to hardware struct
941  * @rar: receive address register index to associate with a VMDq index
942  * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
943  */
944 static s32
945 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
946 {
947 	u32 rar_high;
948 	u32 rar_entries = hw->mac.num_rar_entries;
949 
950 	UNREFERENCED_PARAMETER(vmdq);
951 
952 	if (rar < rar_entries) {
953 		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
954 		if (rar_high & IXGBE_RAH_VIND_MASK) {
955 			rar_high &= ~IXGBE_RAH_VIND_MASK;
956 			IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
957 		}
958 	} else {
959 		DEBUGOUT1("RAR index %d is out of range.\n", rar);
960 	}
961 
962 	return (IXGBE_SUCCESS);
963 }
964 
965 /*
966  * ixgbe_set_vfta_82598 - Set VLAN filter table
967  * @hw: pointer to hardware structure
968  * @vlan: VLAN id to write to VLAN filter
969  * @vind: VMDq output index that maps queue to VLAN id in VFTA
970  * @vlan_on: boolean flag to turn on/off VLAN in VFTA
971  *
972  * Turn on/off specified VLAN in the VLAN filter table.
973  */
974 s32
975 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
976 {
977 	u32 regindex;
978 	u32 bitindex;
979 	u32 bits;
980 	u32 vftabyte;
981 
982 	DEBUGFUNC("ixgbe_set_vfta_82598");
983 
984 	if (vlan > 4095)
985 		return (IXGBE_ERR_PARAM);
986 
987 	/* Determine 32-bit word position in array */
988 	regindex = (vlan >> 5) & 0x7F;   /* upper seven bits */
989 
990 	/* Determine the location of the (VMD) queue index */
991 	vftabyte =  ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
992 	bitindex = (vlan & 0x7) << 2;    /* lower 3 bits indicate nibble */
993 
994 	/* Set the nibble for VMD queue index */
995 	bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
996 	bits &= (~(0x0F << bitindex));
997 	bits |= (vind << bitindex);
998 	IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
999 
1000 	/* Determine the location of the bit for this VLAN id */
1001 	bitindex = vlan & 0x1F;   /* lower five bits */
1002 
1003 	bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1004 	if (vlan_on)
1005 		/* Turn on this VLAN id */
1006 		bits |= (1 << bitindex);
1007 	else
1008 		/* Turn off this VLAN id */
1009 		bits &= ~(1 << bitindex);
1010 	IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1011 
1012 	return (IXGBE_SUCCESS);
1013 }
1014 
1015 /*
1016  * ixgbe_clear_vfta_82598 - Clear VLAN filter table
1017  * @hw: pointer to hardware structure
1018  *
1019  * Clears the VLAN filer table, and the VMDq index associated with the filter
1020  */
1021 static s32
1022 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
1023 {
1024 	u32 offset;
1025 	u32 vlanbyte;
1026 
1027 	DEBUGFUNC("ixgbe_clear_vfta_82598");
1028 
1029 	for (offset = 0; offset < hw->mac.vft_size; offset++)
1030 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1031 
1032 	for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
1033 		for (offset = 0; offset < hw->mac.vft_size; offset++)
1034 			IXGBE_WRITE_REG(hw,
1035 			    IXGBE_VFTAVIND(vlanbyte, offset), 0);
1036 
1037 	return (IXGBE_SUCCESS);
1038 }
1039 
1040 /*
1041  * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
1042  * @hw: pointer to hardware structure
1043  * @reg: analog register to read
1044  * @val: read value
1045  *
1046  * Performs read operation to Atlas analog register specified.
1047  */
1048 s32
1049 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
1050 {
1051 	u32  atlas_ctl;
1052 
1053 	DEBUGFUNC("ixgbe_read_analog_reg8_82598");
1054 
1055 	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1056 	    IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1057 	IXGBE_WRITE_FLUSH(hw);
1058 	usec_delay(10);
1059 	atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1060 	*val = (u8)atlas_ctl;
1061 
1062 	return (IXGBE_SUCCESS);
1063 }
1064 
1065 /*
1066  * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
1067  * @hw: pointer to hardware structure
1068  * @reg: atlas register to write
1069  * @val: value to write
1070  *
1071  * Performs write operation to Atlas analog register specified.
1072  */
1073 s32
1074 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1075 {
1076 	u32  atlas_ctl;
1077 
1078 	DEBUGFUNC("ixgbe_write_analog_reg8_82598");
1079 
1080 	atlas_ctl = (reg << 8) | val;
1081 	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1082 	IXGBE_WRITE_FLUSH(hw);
1083 	usec_delay(10);
1084 
1085 	return (IXGBE_SUCCESS);
1086 }
1087 
1088 /*
1089  * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1090  * @hw: pointer to hardware structure
1091  * @byte_offset: EEPROM byte offset to read
1092  * @eeprom_data: value read
1093  *
1094  * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1095  */
1096 s32
1097 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1098     u8 *eeprom_data)
1099 {
1100 	s32 status = IXGBE_SUCCESS;
1101 	u16 sfp_addr = 0;
1102 	u16 sfp_data = 0;
1103 	u16 sfp_stat = 0;
1104 	u32 i;
1105 
1106 	DEBUGFUNC("ixgbe_read_i2c_eeprom_82598");
1107 
1108 	if (hw->phy.type == ixgbe_phy_nl) {
1109 		/*
1110 		 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1111 		 * 0xC30D. These registers are used to talk to the SFP+
1112 		 * module's EEPROM through the SDA/SCL (I2C) interface.
1113 		 */
1114 		sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
1115 		sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1116 		hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1117 		    IXGBE_MDIO_PMA_PMD_DEV_TYPE, sfp_addr);
1118 
1119 		/* Poll status */
1120 		for (i = 0; i < 100; i++) {
1121 			hw->phy.ops.read_reg(hw,
1122 			    IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1123 			    IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_stat);
1124 			sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1125 			if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1126 				break;
1127 			msec_delay(10);
1128 		}
1129 
1130 		if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1131 			DEBUGOUT("EEPROM read did not pass.\n");
1132 			status = IXGBE_ERR_SFP_NOT_PRESENT;
1133 			goto out;
1134 		}
1135 
1136 		/* Read data */
1137 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1138 		    IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1139 
1140 		*eeprom_data = (u8)(sfp_data >> 8);
1141 	} else {
1142 		status = IXGBE_ERR_PHY;
1143 		goto out;
1144 	}
1145 
1146 out:
1147 	return (status);
1148 }
1149 
1150 /*
1151  * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1152  * @hw: pointer to hardware structure
1153  *
1154  * Determines physical layer capabilities of the current configuration.
1155  */
1156 u32
1157 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1158 {
1159 	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1160 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1161 	u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1162 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1163 	u16 ext_ability = 0;
1164 
1165 	DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
1166 
1167 	hw->phy.ops.identify(hw);
1168 
1169 	/*
1170 	 * Copper PHY must be checked before AUTOC LMS to determine correct
1171 	 * physical layer because 10GBase-T PHYs use LMS = KX4/KX
1172 	 */
1173 	if (hw->phy.type == ixgbe_phy_tn ||
1174 	    hw->phy.type == ixgbe_phy_cu_unknown) {
1175 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1176 		    IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1177 		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1178 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1179 		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1180 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1181 		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1182 			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1183 		goto out;
1184 	}
1185 
1186 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1187 	case IXGBE_AUTOC_LMS_1G_AN:
1188 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1189 		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1190 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1191 		else
1192 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1193 		break;
1194 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1195 		if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1196 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1197 		else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1198 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1199 		else /* XAUI */
1200 			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1201 		break;
1202 	case IXGBE_AUTOC_LMS_KX4_AN:
1203 	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1204 		if (autoc & IXGBE_AUTOC_KX_SUPP)
1205 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1206 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
1207 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1208 		break;
1209 	default:
1210 		break;
1211 	}
1212 
1213 	if (hw->phy.type == ixgbe_phy_nl) {
1214 		hw->phy.ops.identify_sfp(hw);
1215 
1216 		switch (hw->phy.sfp_type) {
1217 		case ixgbe_sfp_type_da_cu:
1218 			physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1219 			break;
1220 		case ixgbe_sfp_type_sr:
1221 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1222 			break;
1223 		case ixgbe_sfp_type_lr:
1224 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1225 			break;
1226 		default:
1227 			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1228 			break;
1229 		}
1230 	}
1231 
1232 	switch (hw->device_id) {
1233 	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1234 		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1235 		break;
1236 	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1237 	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1238 	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1239 		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1240 		break;
1241 	case IXGBE_DEV_ID_82598EB_XF_LR:
1242 		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1243 		break;
1244 	default:
1245 		break;
1246 	}
1247 
1248 out:
1249 	return (physical_layer);
1250 }
1251 
1252 /*
1253  * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1254  * port devices.
1255  * @hw: pointer to the HW structure
1256  *
1257  * Calls common function and corrects issue with some single port devices
1258  * that enable LAN1 but not LAN0.
1259  */
1260 void
1261 ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1262 {
1263 	struct ixgbe_bus_info *bus = &hw->bus;
1264 	u16 pci_gen, pci_ctrl2;
1265 
1266 	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
1267 
1268 	ixgbe_set_lan_id_multi_port_pcie(hw);
1269 
1270 	/* check if LAN0 is disabled */
1271 	hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1272 	if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1273 		hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1274 
1275 		/* if LAN0 is completely disabled force function to 0 */
1276 		if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1277 		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1278 		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1279 			bus->func = 0;
1280 		}
1281 	}
1282 }
1283 
1284 /*
1285  * ixgbe_validate_link_ready - Function looks for phy link
1286  * @hw: pointer to hardware structure
1287  *
1288  * Function indicates success when phy link is available. If phy is not ready
1289  * within 5 seconds of MAC indicating link, the function returns error.
1290  */
1291 static s32
1292 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
1293 {
1294 	u32 timeout;
1295 	u16 an_reg;
1296 
1297 	if (hw->device_id != IXGBE_DEV_ID_82598AT2)
1298 		return (IXGBE_SUCCESS);
1299 
1300 	for (timeout = 0;
1301 	    timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
1302 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
1303 		    IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
1304 
1305 		if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
1306 		    (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
1307 			break;
1308 
1309 		msec_delay(100);
1310 	}
1311 
1312 	if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
1313 		DEBUGOUT("Link was indicated but link is down\n");
1314 		return (IXGBE_ERR_LINK_SETUP);
1315 	}
1316 
1317 	return (IXGBE_SUCCESS);
1318 }
1319 
1320 /*
1321  * ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
1322  * @hw: pointer to hardware structure
1323  */
1324 void
1325 ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
1326 {
1327 	u32 regval;
1328 	u32 i;
1329 
1330 	DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
1331 
1332 	/* Enable relaxed ordering */
1333 	for (i = 0; ((i < hw->mac.max_tx_queues) &&
1334 	    (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1335 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1336 		regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1337 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
1338 	}
1339 
1340 	for (i = 0; ((i < hw->mac.max_rx_queues) &&
1341 	    (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1342 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
1343 		regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
1344 		    IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
1345 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
1346 	}
1347 }
1348