1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2016 Nicole Graziano <nicole@nextbsd.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29/* $FreeBSD$ */
30#include "if_em.h"
31#include <sys/sbuf.h>
32#include <machine/_inttypes.h>
33
34#define em_mac_min e1000_82547
35#define igb_mac_min e1000_82575
36
37/*********************************************************************
38 *  Driver version:
39 *********************************************************************/
40char em_driver_version[] = "7.6.1-k";
41
42/*********************************************************************
43 *  PCI Device ID Table
44 *
45 *  Used by probe to select devices to load on
46 *  Last field stores an index into e1000_strings
47 *  Last entry must be all 0s
48 *
49 *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
50 *********************************************************************/
51
52static pci_vendor_info_t em_vendor_info_array[] =
53{
54	/* Intel(R) PRO/1000 Network Connection - Legacy em*/
55	PVID(0x8086, E1000_DEV_ID_82540EM, "Intel(R) PRO/1000 Network Connection"),
56	PVID(0x8086, E1000_DEV_ID_82540EM_LOM, "Intel(R) PRO/1000 Network Connection"),
57	PVID(0x8086, E1000_DEV_ID_82540EP, "Intel(R) PRO/1000 Network Connection"),
58	PVID(0x8086, E1000_DEV_ID_82540EP_LOM, "Intel(R) PRO/1000 Network Connection"),
59	PVID(0x8086, E1000_DEV_ID_82540EP_LP, "Intel(R) PRO/1000 Network Connection"),
60
61	PVID(0x8086, E1000_DEV_ID_82541EI, "Intel(R) PRO/1000 Network Connection"),
62	PVID(0x8086, E1000_DEV_ID_82541ER, "Intel(R) PRO/1000 Network Connection"),
63	PVID(0x8086, E1000_DEV_ID_82541ER_LOM, "Intel(R) PRO/1000 Network Connection"),
64	PVID(0x8086, E1000_DEV_ID_82541EI_MOBILE, "Intel(R) PRO/1000 Network Connection"),
65	PVID(0x8086, E1000_DEV_ID_82541GI, "Intel(R) PRO/1000 Network Connection"),
66	PVID(0x8086, E1000_DEV_ID_82541GI_LF, "Intel(R) PRO/1000 Network Connection"),
67	PVID(0x8086, E1000_DEV_ID_82541GI_MOBILE, "Intel(R) PRO/1000 Network Connection"),
68
69	PVID(0x8086, E1000_DEV_ID_82542, "Intel(R) PRO/1000 Network Connection"),
70
71	PVID(0x8086, E1000_DEV_ID_82543GC_FIBER, "Intel(R) PRO/1000 Network Connection"),
72	PVID(0x8086, E1000_DEV_ID_82543GC_COPPER, "Intel(R) PRO/1000 Network Connection"),
73
74	PVID(0x8086, E1000_DEV_ID_82544EI_COPPER, "Intel(R) PRO/1000 Network Connection"),
75	PVID(0x8086, E1000_DEV_ID_82544EI_FIBER, "Intel(R) PRO/1000 Network Connection"),
76	PVID(0x8086, E1000_DEV_ID_82544GC_COPPER, "Intel(R) PRO/1000 Network Connection"),
77	PVID(0x8086, E1000_DEV_ID_82544GC_LOM, "Intel(R) PRO/1000 Network Connection"),
78
79	PVID(0x8086, E1000_DEV_ID_82545EM_COPPER, "Intel(R) PRO/1000 Network Connection"),
80	PVID(0x8086, E1000_DEV_ID_82545EM_FIBER, "Intel(R) PRO/1000 Network Connection"),
81	PVID(0x8086, E1000_DEV_ID_82545GM_COPPER, "Intel(R) PRO/1000 Network Connection"),
82	PVID(0x8086, E1000_DEV_ID_82545GM_FIBER, "Intel(R) PRO/1000 Network Connection"),
83	PVID(0x8086, E1000_DEV_ID_82545GM_SERDES, "Intel(R) PRO/1000 Network Connection"),
84
85	PVID(0x8086, E1000_DEV_ID_82546EB_COPPER, "Intel(R) PRO/1000 Network Connection"),
86	PVID(0x8086, E1000_DEV_ID_82546EB_FIBER, "Intel(R) PRO/1000 Network Connection"),
87	PVID(0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, "Intel(R) PRO/1000 Network Connection"),
88	PVID(0x8086, E1000_DEV_ID_82546GB_COPPER, "Intel(R) PRO/1000 Network Connection"),
89	PVID(0x8086, E1000_DEV_ID_82546GB_FIBER, "Intel(R) PRO/1000 Network Connection"),
90	PVID(0x8086, E1000_DEV_ID_82546GB_SERDES, "Intel(R) PRO/1000 Network Connection"),
91	PVID(0x8086, E1000_DEV_ID_82546GB_PCIE, "Intel(R) PRO/1000 Network Connection"),
92	PVID(0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, "Intel(R) PRO/1000 Network Connection"),
93	PVID(0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3, "Intel(R) PRO/1000 Network Connection"),
94
95	PVID(0x8086, E1000_DEV_ID_82547EI, "Intel(R) PRO/1000 Network Connection"),
96	PVID(0x8086, E1000_DEV_ID_82547EI_MOBILE, "Intel(R) PRO/1000 Network Connection"),
97	PVID(0x8086, E1000_DEV_ID_82547GI, "Intel(R) PRO/1000 Network Connection"),
98
99	/* Intel(R) PRO/1000 Network Connection - em */
100	PVID(0x8086, E1000_DEV_ID_82571EB_COPPER, "Intel(R) PRO/1000 Network Connection"),
101	PVID(0x8086, E1000_DEV_ID_82571EB_FIBER, "Intel(R) PRO/1000 Network Connection"),
102	PVID(0x8086, E1000_DEV_ID_82571EB_SERDES, "Intel(R) PRO/1000 Network Connection"),
103	PVID(0x8086, E1000_DEV_ID_82571EB_SERDES_DUAL, "Intel(R) PRO/1000 Network Connection"),
104	PVID(0x8086, E1000_DEV_ID_82571EB_SERDES_QUAD, "Intel(R) PRO/1000 Network Connection"),
105	PVID(0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER, "Intel(R) PRO/1000 Network Connection"),
106	PVID(0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LP, "Intel(R) PRO/1000 Network Connection"),
107	PVID(0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER, "Intel(R) PRO/1000 Network Connection"),
108	PVID(0x8086, E1000_DEV_ID_82571PT_QUAD_COPPER, "Intel(R) PRO/1000 Network Connection"),
109	PVID(0x8086, E1000_DEV_ID_82572EI, "Intel(R) PRO/1000 Network Connection"),
110	PVID(0x8086, E1000_DEV_ID_82572EI_COPPER, "Intel(R) PRO/1000 Network Connection"),
111	PVID(0x8086, E1000_DEV_ID_82572EI_FIBER, "Intel(R) PRO/1000 Network Connection"),
112	PVID(0x8086, E1000_DEV_ID_82572EI_SERDES, "Intel(R) PRO/1000 Network Connection"),
113	PVID(0x8086, E1000_DEV_ID_82573E, "Intel(R) PRO/1000 Network Connection"),
114	PVID(0x8086, E1000_DEV_ID_82573E_IAMT, "Intel(R) PRO/1000 Network Connection"),
115	PVID(0x8086, E1000_DEV_ID_82573L, "Intel(R) PRO/1000 Network Connection"),
116	PVID(0x8086, E1000_DEV_ID_82583V, "Intel(R) PRO/1000 Network Connection"),
117	PVID(0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT, "Intel(R) PRO/1000 Network Connection"),
118	PVID(0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT, "Intel(R) PRO/1000 Network Connection"),
119	PVID(0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT, "Intel(R) PRO/1000 Network Connection"),
120	PVID(0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT, "Intel(R) PRO/1000 Network Connection"),
121	PVID(0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT, "Intel(R) PRO/1000 Network Connection"),
122	PVID(0x8086, E1000_DEV_ID_ICH8_IGP_AMT, "Intel(R) PRO/1000 Network Connection"),
123	PVID(0x8086, E1000_DEV_ID_ICH8_IGP_C, "Intel(R) PRO/1000 Network Connection"),
124	PVID(0x8086, E1000_DEV_ID_ICH8_IFE, "Intel(R) PRO/1000 Network Connection"),
125	PVID(0x8086, E1000_DEV_ID_ICH8_IFE_GT, "Intel(R) PRO/1000 Network Connection"),
126	PVID(0x8086, E1000_DEV_ID_ICH8_IFE_G, "Intel(R) PRO/1000 Network Connection"),
127	PVID(0x8086, E1000_DEV_ID_ICH8_IGP_M, "Intel(R) PRO/1000 Network Connection"),
128	PVID(0x8086, E1000_DEV_ID_ICH8_82567V_3, "Intel(R) PRO/1000 Network Connection"),
129	PVID(0x8086, E1000_DEV_ID_ICH9_IGP_M_AMT, "Intel(R) PRO/1000 Network Connection"),
130	PVID(0x8086, E1000_DEV_ID_ICH9_IGP_AMT, "Intel(R) PRO/1000 Network Connection"),
131	PVID(0x8086, E1000_DEV_ID_ICH9_IGP_C, "Intel(R) PRO/1000 Network Connection"),
132	PVID(0x8086, E1000_DEV_ID_ICH9_IGP_M, "Intel(R) PRO/1000 Network Connection"),
133	PVID(0x8086, E1000_DEV_ID_ICH9_IGP_M_V, "Intel(R) PRO/1000 Network Connection"),
134	PVID(0x8086, E1000_DEV_ID_ICH9_IFE, "Intel(R) PRO/1000 Network Connection"),
135	PVID(0x8086, E1000_DEV_ID_ICH9_IFE_GT, "Intel(R) PRO/1000 Network Connection"),
136	PVID(0x8086, E1000_DEV_ID_ICH9_IFE_G, "Intel(R) PRO/1000 Network Connection"),
137	PVID(0x8086, E1000_DEV_ID_ICH9_BM, "Intel(R) PRO/1000 Network Connection"),
138	PVID(0x8086, E1000_DEV_ID_82574L, "Intel(R) PRO/1000 Network Connection"),
139	PVID(0x8086, E1000_DEV_ID_82574LA, "Intel(R) PRO/1000 Network Connection"),
140	PVID(0x8086, E1000_DEV_ID_ICH10_R_BM_LM, "Intel(R) PRO/1000 Network Connection"),
141	PVID(0x8086, E1000_DEV_ID_ICH10_R_BM_LF, "Intel(R) PRO/1000 Network Connection"),
142	PVID(0x8086, E1000_DEV_ID_ICH10_R_BM_V, "Intel(R) PRO/1000 Network Connection"),
143	PVID(0x8086, E1000_DEV_ID_ICH10_D_BM_LM, "Intel(R) PRO/1000 Network Connection"),
144	PVID(0x8086, E1000_DEV_ID_ICH10_D_BM_LF, "Intel(R) PRO/1000 Network Connection"),
145	PVID(0x8086, E1000_DEV_ID_ICH10_D_BM_V, "Intel(R) PRO/1000 Network Connection"),
146	PVID(0x8086, E1000_DEV_ID_PCH_M_HV_LM, "Intel(R) PRO/1000 Network Connection"),
147	PVID(0x8086, E1000_DEV_ID_PCH_M_HV_LC, "Intel(R) PRO/1000 Network Connection"),
148	PVID(0x8086, E1000_DEV_ID_PCH_D_HV_DM, "Intel(R) PRO/1000 Network Connection"),
149	PVID(0x8086, E1000_DEV_ID_PCH_D_HV_DC, "Intel(R) PRO/1000 Network Connection"),
150	PVID(0x8086, E1000_DEV_ID_PCH2_LV_LM, "Intel(R) PRO/1000 Network Connection"),
151	PVID(0x8086, E1000_DEV_ID_PCH2_LV_V, "Intel(R) PRO/1000 Network Connection"),
152	PVID(0x8086, E1000_DEV_ID_PCH_LPT_I217_LM, "Intel(R) PRO/1000 Network Connection"),
153	PVID(0x8086, E1000_DEV_ID_PCH_LPT_I217_V, "Intel(R) PRO/1000 Network Connection"),
154	PVID(0x8086, E1000_DEV_ID_PCH_LPTLP_I218_LM, "Intel(R) PRO/1000 Network Connection"),
155	PVID(0x8086, E1000_DEV_ID_PCH_LPTLP_I218_V, "Intel(R) PRO/1000 Network Connection"),
156	PVID(0x8086, E1000_DEV_ID_PCH_I218_LM2, "Intel(R) PRO/1000 Network Connection"),
157	PVID(0x8086, E1000_DEV_ID_PCH_I218_V2, "Intel(R) PRO/1000 Network Connection"),
158	PVID(0x8086, E1000_DEV_ID_PCH_I218_LM3, "Intel(R) PRO/1000 Network Connection"),
159	PVID(0x8086, E1000_DEV_ID_PCH_I218_V3, "Intel(R) PRO/1000 Network Connection"),
160	PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_LM, "Intel(R) PRO/1000 Network Connection"),
161	PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_V, "Intel(R) PRO/1000 Network Connection"),
162	PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_LM2, "Intel(R) PRO/1000 Network Connection"),
163	PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_V2, "Intel(R) PRO/1000 Network Connection"),
164	PVID(0x8086, E1000_DEV_ID_PCH_LBG_I219_LM3, "Intel(R) PRO/1000 Network Connection"),
165	PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_LM4, "Intel(R) PRO/1000 Network Connection"),
166	PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_V4, "Intel(R) PRO/1000 Network Connection"),
167	PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_LM5, "Intel(R) PRO/1000 Network Connection"),
168	PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_V5, "Intel(R) PRO/1000 Network Connection"),
169	PVID(0x8086, E1000_DEV_ID_PCH_CNP_I219_LM6, "Intel(R) PRO/1000 Network Connection"),
170	PVID(0x8086, E1000_DEV_ID_PCH_CNP_I219_V6, "Intel(R) PRO/1000 Network Connection"),
171	PVID(0x8086, E1000_DEV_ID_PCH_CNP_I219_LM7, "Intel(R) PRO/1000 Network Connection"),
172	PVID(0x8086, E1000_DEV_ID_PCH_CNP_I219_V7, "Intel(R) PRO/1000 Network Connection"),
173	PVID(0x8086, E1000_DEV_ID_PCH_ICP_I219_LM8, "Intel(R) PRO/1000 Network Connection"),
174	PVID(0x8086, E1000_DEV_ID_PCH_ICP_I219_V8, "Intel(R) PRO/1000 Network Connection"),
175	PVID(0x8086, E1000_DEV_ID_PCH_ICP_I219_LM9, "Intel(R) PRO/1000 Network Connection"),
176	PVID(0x8086, E1000_DEV_ID_PCH_ICP_I219_V9, "Intel(R) PRO/1000 Network Connection"),
177	PVID(0x8086, E1000_DEV_ID_PCH_CMP_I219_LM10, "Intel(R) PRO/1000 Network Connection"),
178	PVID(0x8086, E1000_DEV_ID_PCH_CMP_I219_V10, "Intel(R) PRO/1000 Network Connection"),
179	PVID(0x8086, E1000_DEV_ID_PCH_CMP_I219_LM11, "Intel(R) PRO/1000 Network Connection"),
180	PVID(0x8086, E1000_DEV_ID_PCH_CMP_I219_V11, "Intel(R) PRO/1000 Network Connection"),
181	PVID(0x8086, E1000_DEV_ID_PCH_CMP_I219_LM12, "Intel(R) PRO/1000 Network Connection"),
182	PVID(0x8086, E1000_DEV_ID_PCH_CMP_I219_V12, "Intel(R) PRO/1000 Network Connection"),
183	/* required last entry */
184	PVID_END
185};
186
187static pci_vendor_info_t igb_vendor_info_array[] =
188{
189	/* Intel(R) PRO/1000 Network Connection - igb */
190	PVID(0x8086, E1000_DEV_ID_82575EB_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
191	PVID(0x8086, E1000_DEV_ID_82575EB_FIBER_SERDES, "Intel(R) PRO/1000 PCI-Express Network Driver"),
192	PVID(0x8086, E1000_DEV_ID_82575GB_QUAD_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
193	PVID(0x8086, E1000_DEV_ID_82576, "Intel(R) PRO/1000 PCI-Express Network Driver"),
194	PVID(0x8086, E1000_DEV_ID_82576_NS, "Intel(R) PRO/1000 PCI-Express Network Driver"),
195	PVID(0x8086, E1000_DEV_ID_82576_NS_SERDES, "Intel(R) PRO/1000 PCI-Express Network Driver"),
196	PVID(0x8086, E1000_DEV_ID_82576_FIBER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
197	PVID(0x8086, E1000_DEV_ID_82576_SERDES, "Intel(R) PRO/1000 PCI-Express Network Driver"),
198	PVID(0x8086, E1000_DEV_ID_82576_SERDES_QUAD, "Intel(R) PRO/1000 PCI-Express Network Driver"),
199	PVID(0x8086, E1000_DEV_ID_82576_QUAD_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
200	PVID(0x8086, E1000_DEV_ID_82576_QUAD_COPPER_ET2, "Intel(R) PRO/1000 PCI-Express Network Driver"),
201	PVID(0x8086, E1000_DEV_ID_82576_VF, "Intel(R) PRO/1000 PCI-Express Network Driver"),
202	PVID(0x8086, E1000_DEV_ID_82580_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
203	PVID(0x8086, E1000_DEV_ID_82580_FIBER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
204	PVID(0x8086, E1000_DEV_ID_82580_SERDES, "Intel(R) PRO/1000 PCI-Express Network Driver"),
205	PVID(0x8086, E1000_DEV_ID_82580_SGMII, "Intel(R) PRO/1000 PCI-Express Network Driver"),
206	PVID(0x8086, E1000_DEV_ID_82580_COPPER_DUAL, "Intel(R) PRO/1000 PCI-Express Network Driver"),
207	PVID(0x8086, E1000_DEV_ID_82580_QUAD_FIBER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
208	PVID(0x8086, E1000_DEV_ID_DH89XXCC_SERDES, "Intel(R) PRO/1000 PCI-Express Network Driver"),
209	PVID(0x8086, E1000_DEV_ID_DH89XXCC_SGMII, "Intel(R) PRO/1000 PCI-Express Network Driver"),
210	PVID(0x8086, E1000_DEV_ID_DH89XXCC_SFP, "Intel(R) PRO/1000 PCI-Express Network Driver"),
211	PVID(0x8086, E1000_DEV_ID_DH89XXCC_BACKPLANE, "Intel(R) PRO/1000 PCI-Express Network Driver"),
212	PVID(0x8086, E1000_DEV_ID_I350_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
213	PVID(0x8086, E1000_DEV_ID_I350_FIBER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
214	PVID(0x8086, E1000_DEV_ID_I350_SERDES, "Intel(R) PRO/1000 PCI-Express Network Driver"),
215	PVID(0x8086, E1000_DEV_ID_I350_SGMII, "Intel(R) PRO/1000 PCI-Express Network Driver"),
216	PVID(0x8086, E1000_DEV_ID_I350_VF, "Intel(R) PRO/1000 PCI-Express Network Driver"),
217	PVID(0x8086, E1000_DEV_ID_I210_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
218	PVID(0x8086, E1000_DEV_ID_I210_COPPER_IT, "Intel(R) PRO/1000 PCI-Express Network Driver"),
219	PVID(0x8086, E1000_DEV_ID_I210_COPPER_OEM1, "Intel(R) PRO/1000 PCI-Express Network Driver"),
220	PVID(0x8086, E1000_DEV_ID_I210_COPPER_FLASHLESS, "Intel(R) PRO/1000 PCI-Express Network Driver"),
221	PVID(0x8086, E1000_DEV_ID_I210_SERDES_FLASHLESS, "Intel(R) PRO/1000 PCI-Express Network Driver"),
222	PVID(0x8086, E1000_DEV_ID_I210_FIBER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
223	PVID(0x8086, E1000_DEV_ID_I210_SERDES, "Intel(R) PRO/1000 PCI-Express Network Driver"),
224	PVID(0x8086, E1000_DEV_ID_I210_SGMII, "Intel(R) PRO/1000 PCI-Express Network Driver"),
225	PVID(0x8086, E1000_DEV_ID_I211_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
226	PVID(0x8086, E1000_DEV_ID_I354_BACKPLANE_1GBPS, "Intel(R) PRO/1000 PCI-Express Network Driver"),
227	PVID(0x8086, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS, "Intel(R) PRO/1000 PCI-Express Network Driver"),
228	PVID(0x8086, E1000_DEV_ID_I354_SGMII, "Intel(R) PRO/1000 PCI-Express Network Driver"),
229	/* required last entry */
230	PVID_END
231};
232
233/*********************************************************************
234 *  Function prototypes
235 *********************************************************************/
236static void	*em_register(device_t dev);
237static void	*igb_register(device_t dev);
238static int	em_if_attach_pre(if_ctx_t ctx);
239static int	em_if_attach_post(if_ctx_t ctx);
240static int	em_if_detach(if_ctx_t ctx);
241static int	em_if_shutdown(if_ctx_t ctx);
242static int	em_if_suspend(if_ctx_t ctx);
243static int	em_if_resume(if_ctx_t ctx);
244
245static int	em_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
246static int	em_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets);
247static void	em_if_queues_free(if_ctx_t ctx);
248
249static uint64_t	em_if_get_counter(if_ctx_t, ift_counter);
250static void	em_if_init(if_ctx_t ctx);
251static void	em_if_stop(if_ctx_t ctx);
252static void	em_if_media_status(if_ctx_t, struct ifmediareq *);
253static int	em_if_media_change(if_ctx_t ctx);
254static int	em_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
255static void	em_if_timer(if_ctx_t ctx, uint16_t qid);
256static void	em_if_vlan_register(if_ctx_t ctx, u16 vtag);
257static void	em_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
258static void	em_if_watchdog_reset(if_ctx_t ctx);
259static bool	em_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
260
261static void	em_identify_hardware(if_ctx_t ctx);
262static int	em_allocate_pci_resources(if_ctx_t ctx);
263static void	em_free_pci_resources(if_ctx_t ctx);
264static void	em_reset(if_ctx_t ctx);
265static int	em_setup_interface(if_ctx_t ctx);
266static int	em_setup_msix(if_ctx_t ctx);
267
268static void	em_initialize_transmit_unit(if_ctx_t ctx);
269static void	em_initialize_receive_unit(if_ctx_t ctx);
270
271static void	em_if_intr_enable(if_ctx_t ctx);
272static void	em_if_intr_disable(if_ctx_t ctx);
273static void	igb_if_intr_enable(if_ctx_t ctx);
274static void	igb_if_intr_disable(if_ctx_t ctx);
275static int	em_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
276static int	em_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
277static int	igb_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
278static int	igb_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
279static void	em_if_multi_set(if_ctx_t ctx);
280static void	em_if_update_admin_status(if_ctx_t ctx);
281static void	em_if_debug(if_ctx_t ctx);
282static void	em_update_stats_counters(struct adapter *);
283static void	em_add_hw_stats(struct adapter *adapter);
284static int	em_if_set_promisc(if_ctx_t ctx, int flags);
285static void	em_setup_vlan_hw_support(struct adapter *);
286static int	em_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
287static void	em_print_nvm_info(struct adapter *);
288static int	em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
289static int	em_get_rs(SYSCTL_HANDLER_ARGS);
290static void	em_print_debug_info(struct adapter *);
291static int 	em_is_valid_ether_addr(u8 *);
292static int	em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
293static void	em_add_int_delay_sysctl(struct adapter *, const char *,
294		    const char *, struct em_int_delay_info *, int, int);
295/* Management and WOL Support */
296static void	em_init_manageability(struct adapter *);
297static void	em_release_manageability(struct adapter *);
298static void	em_get_hw_control(struct adapter *);
299static void	em_release_hw_control(struct adapter *);
300static void	em_get_wakeup(if_ctx_t ctx);
301static void	em_enable_wakeup(if_ctx_t ctx);
302static int	em_enable_phy_wakeup(struct adapter *);
303static void	em_disable_aspm(struct adapter *);
304
305int		em_intr(void *arg);
306static void	em_disable_promisc(if_ctx_t ctx);
307
308/* MSI-X handlers */
309static int	em_if_msix_intr_assign(if_ctx_t, int);
310static int	em_msix_link(void *);
311static void	em_handle_link(void *context);
312
313static void	em_enable_vectors_82574(if_ctx_t);
314
315static int	em_set_flowcntl(SYSCTL_HANDLER_ARGS);
316static int	em_sysctl_eee(SYSCTL_HANDLER_ARGS);
317static void	em_if_led_func(if_ctx_t ctx, int onoff);
318
319static int	em_get_regs(SYSCTL_HANDLER_ARGS);
320
321static void	lem_smartspeed(struct adapter *adapter);
322static void	igb_configure_queues(struct adapter *adapter);
323
324
325/*********************************************************************
326 *  FreeBSD Device Interface Entry Points
327 *********************************************************************/
328static device_method_t em_methods[] = {
329	/* Device interface */
330	DEVMETHOD(device_register, em_register),
331	DEVMETHOD(device_probe, iflib_device_probe),
332	DEVMETHOD(device_attach, iflib_device_attach),
333	DEVMETHOD(device_detach, iflib_device_detach),
334	DEVMETHOD(device_shutdown, iflib_device_shutdown),
335	DEVMETHOD(device_suspend, iflib_device_suspend),
336	DEVMETHOD(device_resume, iflib_device_resume),
337	DEVMETHOD_END
338};
339
340static device_method_t igb_methods[] = {
341	/* Device interface */
342	DEVMETHOD(device_register, igb_register),
343	DEVMETHOD(device_probe, iflib_device_probe),
344	DEVMETHOD(device_attach, iflib_device_attach),
345	DEVMETHOD(device_detach, iflib_device_detach),
346	DEVMETHOD(device_shutdown, iflib_device_shutdown),
347	DEVMETHOD(device_suspend, iflib_device_suspend),
348	DEVMETHOD(device_resume, iflib_device_resume),
349	DEVMETHOD_END
350};
351
352
353static driver_t em_driver = {
354	"em", em_methods, sizeof(struct adapter),
355};
356
357static devclass_t em_devclass;
358DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
359
360MODULE_DEPEND(em, pci, 1, 1, 1);
361MODULE_DEPEND(em, ether, 1, 1, 1);
362MODULE_DEPEND(em, iflib, 1, 1, 1);
363
364IFLIB_PNP_INFO(pci, em, em_vendor_info_array);
365
366static driver_t igb_driver = {
367	"igb", igb_methods, sizeof(struct adapter),
368};
369
370static devclass_t igb_devclass;
371DRIVER_MODULE(igb, pci, igb_driver, igb_devclass, 0, 0);
372
373MODULE_DEPEND(igb, pci, 1, 1, 1);
374MODULE_DEPEND(igb, ether, 1, 1, 1);
375MODULE_DEPEND(igb, iflib, 1, 1, 1);
376
377IFLIB_PNP_INFO(pci, igb, igb_vendor_info_array);
378
379static device_method_t em_if_methods[] = {
380	DEVMETHOD(ifdi_attach_pre, em_if_attach_pre),
381	DEVMETHOD(ifdi_attach_post, em_if_attach_post),
382	DEVMETHOD(ifdi_detach, em_if_detach),
383	DEVMETHOD(ifdi_shutdown, em_if_shutdown),
384	DEVMETHOD(ifdi_suspend, em_if_suspend),
385	DEVMETHOD(ifdi_resume, em_if_resume),
386	DEVMETHOD(ifdi_init, em_if_init),
387	DEVMETHOD(ifdi_stop, em_if_stop),
388	DEVMETHOD(ifdi_msix_intr_assign, em_if_msix_intr_assign),
389	DEVMETHOD(ifdi_intr_enable, em_if_intr_enable),
390	DEVMETHOD(ifdi_intr_disable, em_if_intr_disable),
391	DEVMETHOD(ifdi_tx_queues_alloc, em_if_tx_queues_alloc),
392	DEVMETHOD(ifdi_rx_queues_alloc, em_if_rx_queues_alloc),
393	DEVMETHOD(ifdi_queues_free, em_if_queues_free),
394	DEVMETHOD(ifdi_update_admin_status, em_if_update_admin_status),
395	DEVMETHOD(ifdi_multi_set, em_if_multi_set),
396	DEVMETHOD(ifdi_media_status, em_if_media_status),
397	DEVMETHOD(ifdi_media_change, em_if_media_change),
398	DEVMETHOD(ifdi_mtu_set, em_if_mtu_set),
399	DEVMETHOD(ifdi_promisc_set, em_if_set_promisc),
400	DEVMETHOD(ifdi_timer, em_if_timer),
401	DEVMETHOD(ifdi_watchdog_reset, em_if_watchdog_reset),
402	DEVMETHOD(ifdi_vlan_register, em_if_vlan_register),
403	DEVMETHOD(ifdi_vlan_unregister, em_if_vlan_unregister),
404	DEVMETHOD(ifdi_get_counter, em_if_get_counter),
405	DEVMETHOD(ifdi_led_func, em_if_led_func),
406	DEVMETHOD(ifdi_rx_queue_intr_enable, em_if_rx_queue_intr_enable),
407	DEVMETHOD(ifdi_tx_queue_intr_enable, em_if_tx_queue_intr_enable),
408	DEVMETHOD(ifdi_debug, em_if_debug),
409	DEVMETHOD(ifdi_needs_restart, em_if_needs_restart),
410	DEVMETHOD_END
411};
412
413static driver_t em_if_driver = {
414	"em_if", em_if_methods, sizeof(struct adapter)
415};
416
417static device_method_t igb_if_methods[] = {
418	DEVMETHOD(ifdi_attach_pre, em_if_attach_pre),
419	DEVMETHOD(ifdi_attach_post, em_if_attach_post),
420	DEVMETHOD(ifdi_detach, em_if_detach),
421	DEVMETHOD(ifdi_shutdown, em_if_shutdown),
422	DEVMETHOD(ifdi_suspend, em_if_suspend),
423	DEVMETHOD(ifdi_resume, em_if_resume),
424	DEVMETHOD(ifdi_init, em_if_init),
425	DEVMETHOD(ifdi_stop, em_if_stop),
426	DEVMETHOD(ifdi_msix_intr_assign, em_if_msix_intr_assign),
427	DEVMETHOD(ifdi_intr_enable, igb_if_intr_enable),
428	DEVMETHOD(ifdi_intr_disable, igb_if_intr_disable),
429	DEVMETHOD(ifdi_tx_queues_alloc, em_if_tx_queues_alloc),
430	DEVMETHOD(ifdi_rx_queues_alloc, em_if_rx_queues_alloc),
431	DEVMETHOD(ifdi_queues_free, em_if_queues_free),
432	DEVMETHOD(ifdi_update_admin_status, em_if_update_admin_status),
433	DEVMETHOD(ifdi_multi_set, em_if_multi_set),
434	DEVMETHOD(ifdi_media_status, em_if_media_status),
435	DEVMETHOD(ifdi_media_change, em_if_media_change),
436	DEVMETHOD(ifdi_mtu_set, em_if_mtu_set),
437	DEVMETHOD(ifdi_promisc_set, em_if_set_promisc),
438	DEVMETHOD(ifdi_timer, em_if_timer),
439	DEVMETHOD(ifdi_watchdog_reset, em_if_watchdog_reset),
440	DEVMETHOD(ifdi_vlan_register, em_if_vlan_register),
441	DEVMETHOD(ifdi_vlan_unregister, em_if_vlan_unregister),
442	DEVMETHOD(ifdi_get_counter, em_if_get_counter),
443	DEVMETHOD(ifdi_led_func, em_if_led_func),
444	DEVMETHOD(ifdi_rx_queue_intr_enable, igb_if_rx_queue_intr_enable),
445	DEVMETHOD(ifdi_tx_queue_intr_enable, igb_if_tx_queue_intr_enable),
446	DEVMETHOD(ifdi_debug, em_if_debug),
447	DEVMETHOD(ifdi_needs_restart, em_if_needs_restart),
448	DEVMETHOD_END
449};
450
451static driver_t igb_if_driver = {
452	"igb_if", igb_if_methods, sizeof(struct adapter)
453};
454
455/*********************************************************************
456 *  Tunable default values.
457 *********************************************************************/
458
459#define EM_TICKS_TO_USECS(ticks)	((1024 * (ticks) + 500) / 1000)
460#define EM_USECS_TO_TICKS(usecs)	((1000 * (usecs) + 512) / 1024)
461
462#define MAX_INTS_PER_SEC	8000
463#define DEFAULT_ITR		(1000000000/(MAX_INTS_PER_SEC * 256))
464
465/* Allow common code without TSO */
466#ifndef CSUM_TSO
467#define CSUM_TSO	0
468#endif
469
470static SYSCTL_NODE(_hw, OID_AUTO, em, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
471    "EM driver parameters");
472
473static int em_disable_crc_stripping = 0;
474SYSCTL_INT(_hw_em, OID_AUTO, disable_crc_stripping, CTLFLAG_RDTUN,
475    &em_disable_crc_stripping, 0, "Disable CRC Stripping");
476
477static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
478static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
479SYSCTL_INT(_hw_em, OID_AUTO, tx_int_delay, CTLFLAG_RDTUN, &em_tx_int_delay_dflt,
480    0, "Default transmit interrupt delay in usecs");
481SYSCTL_INT(_hw_em, OID_AUTO, rx_int_delay, CTLFLAG_RDTUN, &em_rx_int_delay_dflt,
482    0, "Default receive interrupt delay in usecs");
483
484static int em_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
485static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
486SYSCTL_INT(_hw_em, OID_AUTO, tx_abs_int_delay, CTLFLAG_RDTUN,
487    &em_tx_abs_int_delay_dflt, 0,
488    "Default transmit interrupt delay limit in usecs");
489SYSCTL_INT(_hw_em, OID_AUTO, rx_abs_int_delay, CTLFLAG_RDTUN,
490    &em_rx_abs_int_delay_dflt, 0,
491    "Default receive interrupt delay limit in usecs");
492
493static int em_smart_pwr_down = FALSE;
494SYSCTL_INT(_hw_em, OID_AUTO, smart_pwr_down, CTLFLAG_RDTUN, &em_smart_pwr_down,
495    0, "Set to true to leave smart power down enabled on newer adapters");
496
497/* Controls whether promiscuous also shows bad packets */
498static int em_debug_sbp = TRUE;
499SYSCTL_INT(_hw_em, OID_AUTO, sbp, CTLFLAG_RDTUN, &em_debug_sbp, 0,
500    "Show bad packets in promiscuous mode");
501
502/* How many packets rxeof tries to clean at a time */
503static int em_rx_process_limit = 100;
504SYSCTL_INT(_hw_em, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
505    &em_rx_process_limit, 0,
506    "Maximum number of received packets to process "
507    "at a time, -1 means unlimited");
508
509/* Energy efficient ethernet - default to OFF */
510static int eee_setting = 1;
511SYSCTL_INT(_hw_em, OID_AUTO, eee_setting, CTLFLAG_RDTUN, &eee_setting, 0,
512    "Enable Energy Efficient Ethernet");
513
514/*
515** Tuneable Interrupt rate
516*/
517static int em_max_interrupt_rate = 8000;
518SYSCTL_INT(_hw_em, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
519    &em_max_interrupt_rate, 0, "Maximum interrupts per second");
520
521
522
523/* Global used in WOL setup with multiport cards */
524static int global_quad_port_a = 0;
525
526extern struct if_txrx igb_txrx;
527extern struct if_txrx em_txrx;
528extern struct if_txrx lem_txrx;
529
530static struct if_shared_ctx em_sctx_init = {
531	.isc_magic = IFLIB_MAGIC,
532	.isc_q_align = PAGE_SIZE,
533	.isc_tx_maxsize = EM_TSO_SIZE + sizeof(struct ether_vlan_header),
534	.isc_tx_maxsegsize = PAGE_SIZE,
535	.isc_tso_maxsize = EM_TSO_SIZE + sizeof(struct ether_vlan_header),
536	.isc_tso_maxsegsize = EM_TSO_SEG_SIZE,
537	.isc_rx_maxsize = MJUM9BYTES,
538	.isc_rx_nsegments = 1,
539	.isc_rx_maxsegsize = MJUM9BYTES,
540	.isc_nfl = 1,
541	.isc_nrxqs = 1,
542	.isc_ntxqs = 1,
543	.isc_admin_intrcnt = 1,
544	.isc_vendor_info = em_vendor_info_array,
545	.isc_driver_version = em_driver_version,
546	.isc_driver = &em_if_driver,
547	.isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP | IFLIB_NEED_ZERO_CSUM,
548
549	.isc_nrxd_min = {EM_MIN_RXD},
550	.isc_ntxd_min = {EM_MIN_TXD},
551	.isc_nrxd_max = {EM_MAX_RXD},
552	.isc_ntxd_max = {EM_MAX_TXD},
553	.isc_nrxd_default = {EM_DEFAULT_RXD},
554	.isc_ntxd_default = {EM_DEFAULT_TXD},
555};
556
557if_shared_ctx_t em_sctx = &em_sctx_init;
558
559static struct if_shared_ctx igb_sctx_init = {
560	.isc_magic = IFLIB_MAGIC,
561	.isc_q_align = PAGE_SIZE,
562	.isc_tx_maxsize = EM_TSO_SIZE + sizeof(struct ether_vlan_header),
563	.isc_tx_maxsegsize = PAGE_SIZE,
564	.isc_tso_maxsize = EM_TSO_SIZE + sizeof(struct ether_vlan_header),
565	.isc_tso_maxsegsize = EM_TSO_SEG_SIZE,
566	.isc_rx_maxsize = MJUM9BYTES,
567	.isc_rx_nsegments = 1,
568	.isc_rx_maxsegsize = MJUM9BYTES,
569	.isc_nfl = 1,
570	.isc_nrxqs = 1,
571	.isc_ntxqs = 1,
572	.isc_admin_intrcnt = 1,
573	.isc_vendor_info = igb_vendor_info_array,
574	.isc_driver_version = em_driver_version,
575	.isc_driver = &igb_if_driver,
576	.isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP | IFLIB_NEED_ZERO_CSUM,
577
578	.isc_nrxd_min = {EM_MIN_RXD},
579	.isc_ntxd_min = {EM_MIN_TXD},
580	.isc_nrxd_max = {IGB_MAX_RXD},
581	.isc_ntxd_max = {IGB_MAX_TXD},
582	.isc_nrxd_default = {EM_DEFAULT_RXD},
583	.isc_ntxd_default = {EM_DEFAULT_TXD},
584};
585
586if_shared_ctx_t igb_sctx = &igb_sctx_init;
587
588/*****************************************************************
589 *
590 * Dump Registers
591 *
592 ****************************************************************/
593#define IGB_REGS_LEN 739
594
595static int em_get_regs(SYSCTL_HANDLER_ARGS)
596{
597	struct adapter *adapter = (struct adapter *)arg1;
598	struct e1000_hw *hw = &adapter->hw;
599	struct sbuf *sb;
600	u32 *regs_buff;
601	int rc;
602
603	regs_buff = malloc(sizeof(u32) * IGB_REGS_LEN, M_DEVBUF, M_WAITOK);
604	memset(regs_buff, 0, IGB_REGS_LEN * sizeof(u32));
605
606	rc = sysctl_wire_old_buffer(req, 0);
607	MPASS(rc == 0);
608	if (rc != 0) {
609		free(regs_buff, M_DEVBUF);
610		return (rc);
611	}
612
613	sb = sbuf_new_for_sysctl(NULL, NULL, 32*400, req);
614	MPASS(sb != NULL);
615	if (sb == NULL) {
616		free(regs_buff, M_DEVBUF);
617		return (ENOMEM);
618	}
619
620	/* General Registers */
621	regs_buff[0] = E1000_READ_REG(hw, E1000_CTRL);
622	regs_buff[1] = E1000_READ_REG(hw, E1000_STATUS);
623	regs_buff[2] = E1000_READ_REG(hw, E1000_CTRL_EXT);
624	regs_buff[3] = E1000_READ_REG(hw, E1000_ICR);
625	regs_buff[4] = E1000_READ_REG(hw, E1000_RCTL);
626	regs_buff[5] = E1000_READ_REG(hw, E1000_RDLEN(0));
627	regs_buff[6] = E1000_READ_REG(hw, E1000_RDH(0));
628	regs_buff[7] = E1000_READ_REG(hw, E1000_RDT(0));
629	regs_buff[8] = E1000_READ_REG(hw, E1000_RXDCTL(0));
630	regs_buff[9] = E1000_READ_REG(hw, E1000_RDBAL(0));
631	regs_buff[10] = E1000_READ_REG(hw, E1000_RDBAH(0));
632	regs_buff[11] = E1000_READ_REG(hw, E1000_TCTL);
633	regs_buff[12] = E1000_READ_REG(hw, E1000_TDBAL(0));
634	regs_buff[13] = E1000_READ_REG(hw, E1000_TDBAH(0));
635	regs_buff[14] = E1000_READ_REG(hw, E1000_TDLEN(0));
636	regs_buff[15] = E1000_READ_REG(hw, E1000_TDH(0));
637	regs_buff[16] = E1000_READ_REG(hw, E1000_TDT(0));
638	regs_buff[17] = E1000_READ_REG(hw, E1000_TXDCTL(0));
639	regs_buff[18] = E1000_READ_REG(hw, E1000_TDFH);
640	regs_buff[19] = E1000_READ_REG(hw, E1000_TDFT);
641	regs_buff[20] = E1000_READ_REG(hw, E1000_TDFHS);
642	regs_buff[21] = E1000_READ_REG(hw, E1000_TDFPC);
643
644	sbuf_printf(sb, "General Registers\n");
645	sbuf_printf(sb, "\tCTRL\t %08x\n", regs_buff[0]);
646	sbuf_printf(sb, "\tSTATUS\t %08x\n", regs_buff[1]);
647	sbuf_printf(sb, "\tCTRL_EXIT\t %08x\n\n", regs_buff[2]);
648
649	sbuf_printf(sb, "Interrupt Registers\n");
650	sbuf_printf(sb, "\tICR\t %08x\n\n", regs_buff[3]);
651
652	sbuf_printf(sb, "RX Registers\n");
653	sbuf_printf(sb, "\tRCTL\t %08x\n", regs_buff[4]);
654	sbuf_printf(sb, "\tRDLEN\t %08x\n", regs_buff[5]);
655	sbuf_printf(sb, "\tRDH\t %08x\n", regs_buff[6]);
656	sbuf_printf(sb, "\tRDT\t %08x\n", regs_buff[7]);
657	sbuf_printf(sb, "\tRXDCTL\t %08x\n", regs_buff[8]);
658	sbuf_printf(sb, "\tRDBAL\t %08x\n", regs_buff[9]);
659	sbuf_printf(sb, "\tRDBAH\t %08x\n\n", regs_buff[10]);
660
661	sbuf_printf(sb, "TX Registers\n");
662	sbuf_printf(sb, "\tTCTL\t %08x\n", regs_buff[11]);
663	sbuf_printf(sb, "\tTDBAL\t %08x\n", regs_buff[12]);
664	sbuf_printf(sb, "\tTDBAH\t %08x\n", regs_buff[13]);
665	sbuf_printf(sb, "\tTDLEN\t %08x\n", regs_buff[14]);
666	sbuf_printf(sb, "\tTDH\t %08x\n", regs_buff[15]);
667	sbuf_printf(sb, "\tTDT\t %08x\n", regs_buff[16]);
668	sbuf_printf(sb, "\tTXDCTL\t %08x\n", regs_buff[17]);
669	sbuf_printf(sb, "\tTDFH\t %08x\n", regs_buff[18]);
670	sbuf_printf(sb, "\tTDFT\t %08x\n", regs_buff[19]);
671	sbuf_printf(sb, "\tTDFHS\t %08x\n", regs_buff[20]);
672	sbuf_printf(sb, "\tTDFPC\t %08x\n\n", regs_buff[21]);
673
674	free(regs_buff, M_DEVBUF);
675
676#ifdef DUMP_DESCS
677	{
678		if_softc_ctx_t scctx = adapter->shared;
679		struct rx_ring *rxr = &rx_que->rxr;
680		struct tx_ring *txr = &tx_que->txr;
681		int ntxd = scctx->isc_ntxd[0];
682		int nrxd = scctx->isc_nrxd[0];
683		int j;
684
685	for (j = 0; j < nrxd; j++) {
686		u32 staterr = le32toh(rxr->rx_base[j].wb.upper.status_error);
687		u32 length =  le32toh(rxr->rx_base[j].wb.upper.length);
688		sbuf_printf(sb, "\tReceive Descriptor Address %d: %08" PRIx64 "  Error:%d  Length:%d\n", j, rxr->rx_base[j].read.buffer_addr, staterr, length);
689	}
690
691	for (j = 0; j < min(ntxd, 256); j++) {
692		unsigned int *ptr = (unsigned int *)&txr->tx_base[j];
693
694		sbuf_printf(sb, "\tTXD[%03d] [0]: %08x [1]: %08x [2]: %08x [3]: %08x  eop: %d DD=%d\n",
695			    j, ptr[0], ptr[1], ptr[2], ptr[3], buf->eop,
696			    buf->eop != -1 ? txr->tx_base[buf->eop].upper.fields.status & E1000_TXD_STAT_DD : 0);
697
698	}
699	}
700#endif
701
702	rc = sbuf_finish(sb);
703	sbuf_delete(sb);
704	return(rc);
705}
706
707static void *
708em_register(device_t dev)
709{
710	return (em_sctx);
711}
712
713static void *
714igb_register(device_t dev)
715{
716	return (igb_sctx);
717}
718
719static int
720em_set_num_queues(if_ctx_t ctx)
721{
722	struct adapter *adapter = iflib_get_softc(ctx);
723	int maxqueues;
724
725	/* Sanity check based on HW */
726	switch (adapter->hw.mac.type) {
727	case e1000_82576:
728	case e1000_82580:
729	case e1000_i350:
730	case e1000_i354:
731		maxqueues = 8;
732		break;
733	case e1000_i210:
734	case e1000_82575:
735		maxqueues = 4;
736		break;
737	case e1000_i211:
738	case e1000_82574:
739		maxqueues = 2;
740		break;
741	default:
742		maxqueues = 1;
743		break;
744	}
745
746	return (maxqueues);
747}
748
749#define	LEM_CAPS							\
750    IFCAP_HWCSUM | IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |		\
751    IFCAP_VLAN_HWCSUM | IFCAP_WOL | IFCAP_VLAN_HWFILTER
752
753#define	EM_CAPS								\
754    IFCAP_HWCSUM | IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |		\
755    IFCAP_VLAN_HWCSUM | IFCAP_WOL | IFCAP_VLAN_HWFILTER | IFCAP_TSO4 |	\
756    IFCAP_LRO | IFCAP_VLAN_HWTSO
757
758#define	IGB_CAPS							\
759    IFCAP_HWCSUM | IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |		\
760    IFCAP_VLAN_HWCSUM | IFCAP_WOL | IFCAP_VLAN_HWFILTER | IFCAP_TSO4 |	\
761    IFCAP_LRO | IFCAP_VLAN_HWTSO | IFCAP_JUMBO_MTU | IFCAP_HWCSUM_IPV6 |\
762    IFCAP_TSO6
763
764/*********************************************************************
765 *  Device initialization routine
766 *
767 *  The attach entry point is called when the driver is being loaded.
768 *  This routine identifies the type of hardware, allocates all resources
769 *  and initializes the hardware.
770 *
771 *  return 0 on success, positive on failure
772 *********************************************************************/
773static int
774em_if_attach_pre(if_ctx_t ctx)
775{
776	struct adapter *adapter;
777	if_softc_ctx_t scctx;
778	device_t dev;
779	struct e1000_hw *hw;
780	int error = 0;
781
782	INIT_DEBUGOUT("em_if_attach_pre: begin");
783	dev = iflib_get_dev(ctx);
784	adapter = iflib_get_softc(ctx);
785
786	adapter->ctx = adapter->osdep.ctx = ctx;
787	adapter->dev = adapter->osdep.dev = dev;
788	scctx = adapter->shared = iflib_get_softc_ctx(ctx);
789	adapter->media = iflib_get_media(ctx);
790	hw = &adapter->hw;
791
792	adapter->tx_process_limit = scctx->isc_ntxd[0];
793
794	/* SYSCTL stuff */
795	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
796	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
797	    OID_AUTO, "nvm", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
798	    adapter, 0, em_sysctl_nvm_info, "I", "NVM Information");
799
800	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
801	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
802	    OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
803	    adapter, 0, em_sysctl_debug_info, "I", "Debug Information");
804
805	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
806	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
807	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
808	    adapter, 0, em_set_flowcntl, "I", "Flow Control");
809
810	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
811	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
812	    OID_AUTO, "reg_dump",
813	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0,
814	    em_get_regs, "A", "Dump Registers");
815
816	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
817	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
818	    OID_AUTO, "rs_dump",
819	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0,
820	    em_get_rs, "I", "Dump RS indexes");
821
822	/* Determine hardware and mac info */
823	em_identify_hardware(ctx);
824
825	scctx->isc_tx_nsegments = EM_MAX_SCATTER;
826	scctx->isc_nrxqsets_max = scctx->isc_ntxqsets_max = em_set_num_queues(ctx);
827	if (bootverbose)
828		device_printf(dev, "attach_pre capping queues at %d\n",
829		    scctx->isc_ntxqsets_max);
830
831	if (adapter->hw.mac.type >= igb_mac_min) {
832		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] * sizeof(union e1000_adv_tx_desc), EM_DBA_ALIGN);
833		scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union e1000_adv_rx_desc), EM_DBA_ALIGN);
834		scctx->isc_txd_size[0] = sizeof(union e1000_adv_tx_desc);
835		scctx->isc_rxd_size[0] = sizeof(union e1000_adv_rx_desc);
836		scctx->isc_txrx = &igb_txrx;
837		scctx->isc_tx_tso_segments_max = EM_MAX_SCATTER;
838		scctx->isc_tx_tso_size_max = EM_TSO_SIZE;
839		scctx->isc_tx_tso_segsize_max = EM_TSO_SEG_SIZE;
840		scctx->isc_capabilities = scctx->isc_capenable = IGB_CAPS;
841		scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_TSO |
842		     CSUM_IP6_TCP | CSUM_IP6_UDP;
843		if (adapter->hw.mac.type != e1000_82575)
844			scctx->isc_tx_csum_flags |= CSUM_SCTP | CSUM_IP6_SCTP;
845		/*
846		** Some new devices, as with ixgbe, now may
847		** use a different BAR, so we need to keep
848		** track of which is used.
849		*/
850		scctx->isc_msix_bar = PCIR_BAR(EM_MSIX_BAR);
851		if (pci_read_config(dev, scctx->isc_msix_bar, 4) == 0)
852			scctx->isc_msix_bar += 4;
853	} else if (adapter->hw.mac.type >= em_mac_min) {
854		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]* sizeof(struct e1000_tx_desc), EM_DBA_ALIGN);
855		scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union e1000_rx_desc_extended), EM_DBA_ALIGN);
856		scctx->isc_txd_size[0] = sizeof(struct e1000_tx_desc);
857		scctx->isc_rxd_size[0] = sizeof(union e1000_rx_desc_extended);
858		scctx->isc_txrx = &em_txrx;
859		scctx->isc_tx_tso_segments_max = EM_MAX_SCATTER;
860		scctx->isc_tx_tso_size_max = EM_TSO_SIZE;
861		scctx->isc_tx_tso_segsize_max = EM_TSO_SEG_SIZE;
862		scctx->isc_capabilities = scctx->isc_capenable = EM_CAPS;
863		/*
864		 * For EM-class devices, don't enable IFCAP_{TSO4,VLAN_HWTSO}
865		 * by default as we don't have workarounds for all associated
866		 * silicon errata.  E. g., with several MACs such as 82573E,
867		 * TSO only works at Gigabit speed and otherwise can cause the
868		 * hardware to hang (which also would be next to impossible to
869		 * work around given that already queued TSO-using descriptors
870		 * would need to be flushed and vlan(4) reconfigured at runtime
871		 * in case of a link speed change).  Moreover, MACs like 82579
872		 * still can hang at Gigabit even with all publicly documented
873		 * TSO workarounds implemented.  Generally, the penality of
874		 * these workarounds is rather high and may involve copying
875		 * mbuf data around so advantages of TSO lapse.  Still, TSO may
876		 * work for a few MACs of this class - at least when sticking
877		 * with Gigabit - in which case users may enable TSO manually.
878		 */
879		scctx->isc_capenable &= ~(IFCAP_TSO4 | IFCAP_VLAN_HWTSO);
880		scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_IP_TSO;
881		/*
882		 * We support MSI-X with 82574 only, but indicate to iflib(4)
883		 * that it shall give MSI at least a try with other devices.
884		 */
885		if (adapter->hw.mac.type == e1000_82574) {
886			scctx->isc_msix_bar = PCIR_BAR(EM_MSIX_BAR);
887		} else {
888			scctx->isc_msix_bar = -1;
889			scctx->isc_disable_msix = 1;
890		}
891	} else {
892		scctx->isc_txqsizes[0] = roundup2((scctx->isc_ntxd[0] + 1) * sizeof(struct e1000_tx_desc), EM_DBA_ALIGN);
893		scctx->isc_rxqsizes[0] = roundup2((scctx->isc_nrxd[0] + 1) * sizeof(struct e1000_rx_desc), EM_DBA_ALIGN);
894		scctx->isc_txd_size[0] = sizeof(struct e1000_tx_desc);
895		scctx->isc_rxd_size[0] = sizeof(struct e1000_rx_desc);
896		scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP;
897		scctx->isc_txrx = &lem_txrx;
898		scctx->isc_capabilities = scctx->isc_capenable = LEM_CAPS;
899		if (adapter->hw.mac.type < e1000_82543)
900			scctx->isc_capenable &= ~(IFCAP_HWCSUM|IFCAP_VLAN_HWCSUM);
901		/* INTx only */
902		scctx->isc_msix_bar = 0;
903	}
904
905	/* Setup PCI resources */
906	if (em_allocate_pci_resources(ctx)) {
907		device_printf(dev, "Allocation of PCI resources failed\n");
908		error = ENXIO;
909		goto err_pci;
910	}
911
912	/*
913	** For ICH8 and family we need to
914	** map the flash memory, and this
915	** must happen after the MAC is
916	** identified
917	*/
918	if ((hw->mac.type == e1000_ich8lan) ||
919	    (hw->mac.type == e1000_ich9lan) ||
920	    (hw->mac.type == e1000_ich10lan) ||
921	    (hw->mac.type == e1000_pchlan) ||
922	    (hw->mac.type == e1000_pch2lan) ||
923	    (hw->mac.type == e1000_pch_lpt)) {
924		int rid = EM_BAR_TYPE_FLASH;
925		adapter->flash = bus_alloc_resource_any(dev,
926		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
927		if (adapter->flash == NULL) {
928			device_printf(dev, "Mapping of Flash failed\n");
929			error = ENXIO;
930			goto err_pci;
931		}
932		/* This is used in the shared code */
933		hw->flash_address = (u8 *)adapter->flash;
934		adapter->osdep.flash_bus_space_tag =
935		    rman_get_bustag(adapter->flash);
936		adapter->osdep.flash_bus_space_handle =
937		    rman_get_bushandle(adapter->flash);
938	}
939	/*
940	** In the new SPT device flash is not  a
941	** separate BAR, rather it is also in BAR0,
942	** so use the same tag and an offset handle for the
943	** FLASH read/write macros in the shared code.
944	*/
945	else if (hw->mac.type >= e1000_pch_spt) {
946		adapter->osdep.flash_bus_space_tag =
947		    adapter->osdep.mem_bus_space_tag;
948		adapter->osdep.flash_bus_space_handle =
949		    adapter->osdep.mem_bus_space_handle
950		    + E1000_FLASH_BASE_ADDR;
951	}
952
953	/* Do Shared Code initialization */
954	error = e1000_setup_init_funcs(hw, TRUE);
955	if (error) {
956		device_printf(dev, "Setup of Shared code failed, error %d\n",
957		    error);
958		error = ENXIO;
959		goto err_pci;
960	}
961
962	em_setup_msix(ctx);
963	e1000_get_bus_info(hw);
964
965	/* Set up some sysctls for the tunable interrupt delays */
966	em_add_int_delay_sysctl(adapter, "rx_int_delay",
967	    "receive interrupt delay in usecs", &adapter->rx_int_delay,
968	    E1000_REGISTER(hw, E1000_RDTR), em_rx_int_delay_dflt);
969	em_add_int_delay_sysctl(adapter, "tx_int_delay",
970	    "transmit interrupt delay in usecs", &adapter->tx_int_delay,
971	    E1000_REGISTER(hw, E1000_TIDV), em_tx_int_delay_dflt);
972	em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
973	    "receive interrupt delay limit in usecs",
974	    &adapter->rx_abs_int_delay,
975	    E1000_REGISTER(hw, E1000_RADV),
976	    em_rx_abs_int_delay_dflt);
977	em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
978	    "transmit interrupt delay limit in usecs",
979	    &adapter->tx_abs_int_delay,
980	    E1000_REGISTER(hw, E1000_TADV),
981	    em_tx_abs_int_delay_dflt);
982	em_add_int_delay_sysctl(adapter, "itr",
983	    "interrupt delay limit in usecs/4",
984	    &adapter->tx_itr,
985	    E1000_REGISTER(hw, E1000_ITR),
986	    DEFAULT_ITR);
987
988	hw->mac.autoneg = DO_AUTO_NEG;
989	hw->phy.autoneg_wait_to_complete = FALSE;
990	hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
991
992	if (adapter->hw.mac.type < em_mac_min) {
993		e1000_init_script_state_82541(&adapter->hw, TRUE);
994		e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
995	}
996	/* Copper options */
997	if (hw->phy.media_type == e1000_media_type_copper) {
998		hw->phy.mdix = AUTO_ALL_MODES;
999		hw->phy.disable_polarity_correction = FALSE;
1000		hw->phy.ms_type = EM_MASTER_SLAVE;
1001	}
1002
1003	/*
1004	 * Set the frame limits assuming
1005	 * standard ethernet sized frames.
1006	 */
1007	scctx->isc_max_frame_size = adapter->hw.mac.max_frame_size =
1008	    ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
1009
1010	/*
1011	 * This controls when hardware reports transmit completion
1012	 * status.
1013	 */
1014	hw->mac.report_tx_early = 1;
1015
1016	/* Allocate multicast array memory. */
1017	adapter->mta = malloc(sizeof(u8) * ETHER_ADDR_LEN *
1018	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
1019	if (adapter->mta == NULL) {
1020		device_printf(dev, "Can not allocate multicast setup array\n");
1021		error = ENOMEM;
1022		goto err_late;
1023	}
1024
1025	/* Check SOL/IDER usage */
1026	if (e1000_check_reset_block(hw))
1027		device_printf(dev, "PHY reset is blocked"
1028			      " due to SOL/IDER session.\n");
1029
1030	/* Sysctl for setting Energy Efficient Ethernet */
1031	hw->dev_spec.ich8lan.eee_disable = eee_setting;
1032	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1033	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1034	    OID_AUTO, "eee_control",
1035	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
1036	    adapter, 0, em_sysctl_eee, "I",
1037	    "Disable Energy Efficient Ethernet");
1038
1039	/*
1040	** Start from a known state, this is
1041	** important in reading the nvm and
1042	** mac from that.
1043	*/
1044	e1000_reset_hw(hw);
1045
1046	/* Make sure we have a good EEPROM before we read from it */
1047	if (e1000_validate_nvm_checksum(hw) < 0) {
1048		/*
1049		** Some PCI-E parts fail the first check due to
1050		** the link being in sleep state, call it again,
1051		** if it fails a second time its a real issue.
1052		*/
1053		if (e1000_validate_nvm_checksum(hw) < 0) {
1054			device_printf(dev,
1055			    "The EEPROM Checksum Is Not Valid\n");
1056			error = EIO;
1057			goto err_late;
1058		}
1059	}
1060
1061	/* Copy the permanent MAC address out of the EEPROM */
1062	if (e1000_read_mac_addr(hw) < 0) {
1063		device_printf(dev, "EEPROM read error while reading MAC"
1064			      " address\n");
1065		error = EIO;
1066		goto err_late;
1067	}
1068
1069	if (!em_is_valid_ether_addr(hw->mac.addr)) {
1070		device_printf(dev, "Invalid MAC address\n");
1071		error = EIO;
1072		goto err_late;
1073	}
1074
1075	/* Disable ULP support */
1076	e1000_disable_ulp_lpt_lp(hw, TRUE);
1077
1078	/*
1079	 * Get Wake-on-Lan and Management info for later use
1080	 */
1081	em_get_wakeup(ctx);
1082
1083	/* Enable only WOL MAGIC by default */
1084	scctx->isc_capenable &= ~IFCAP_WOL;
1085	if (adapter->wol != 0)
1086		scctx->isc_capenable |= IFCAP_WOL_MAGIC;
1087
1088	iflib_set_mac(ctx, hw->mac.addr);
1089
1090	return (0);
1091
1092err_late:
1093	em_release_hw_control(adapter);
1094err_pci:
1095	em_free_pci_resources(ctx);
1096	free(adapter->mta, M_DEVBUF);
1097
1098	return (error);
1099}
1100
1101static int
1102em_if_attach_post(if_ctx_t ctx)
1103{
1104	struct adapter *adapter = iflib_get_softc(ctx);
1105	struct e1000_hw *hw = &adapter->hw;
1106	int error = 0;
1107
1108	/* Setup OS specific network interface */
1109	error = em_setup_interface(ctx);
1110	if (error != 0) {
1111		goto err_late;
1112	}
1113
1114	em_reset(ctx);
1115
1116	/* Initialize statistics */
1117	em_update_stats_counters(adapter);
1118	hw->mac.get_link_status = 1;
1119	em_if_update_admin_status(ctx);
1120	em_add_hw_stats(adapter);
1121
1122	/* Non-AMT based hardware can now take control from firmware */
1123	if (adapter->has_manage && !adapter->has_amt)
1124		em_get_hw_control(adapter);
1125
1126	INIT_DEBUGOUT("em_if_attach_post: end");
1127
1128	return (error);
1129
1130err_late:
1131	em_release_hw_control(adapter);
1132	em_free_pci_resources(ctx);
1133	em_if_queues_free(ctx);
1134	free(adapter->mta, M_DEVBUF);
1135
1136	return (error);
1137}
1138
1139/*********************************************************************
1140 *  Device removal routine
1141 *
1142 *  The detach entry point is called when the driver is being removed.
1143 *  This routine stops the adapter and deallocates all the resources
1144 *  that were allocated for driver operation.
1145 *
1146 *  return 0 on success, positive on failure
1147 *********************************************************************/
1148static int
1149em_if_detach(if_ctx_t ctx)
1150{
1151	struct adapter	*adapter = iflib_get_softc(ctx);
1152
1153	INIT_DEBUGOUT("em_if_detach: begin");
1154
1155	e1000_phy_hw_reset(&adapter->hw);
1156
1157	em_release_manageability(adapter);
1158	em_release_hw_control(adapter);
1159	em_free_pci_resources(ctx);
1160
1161	return (0);
1162}
1163
1164/*********************************************************************
1165 *
1166 *  Shutdown entry point
1167 *
1168 **********************************************************************/
1169
1170static int
1171em_if_shutdown(if_ctx_t ctx)
1172{
1173	return em_if_suspend(ctx);
1174}
1175
1176/*
1177 * Suspend/resume device methods.
1178 */
1179static int
1180em_if_suspend(if_ctx_t ctx)
1181{
1182	struct adapter *adapter = iflib_get_softc(ctx);
1183
1184	em_release_manageability(adapter);
1185	em_release_hw_control(adapter);
1186	em_enable_wakeup(ctx);
1187	return (0);
1188}
1189
1190static int
1191em_if_resume(if_ctx_t ctx)
1192{
1193	struct adapter *adapter = iflib_get_softc(ctx);
1194
1195	if (adapter->hw.mac.type == e1000_pch2lan)
1196		e1000_resume_workarounds_pchlan(&adapter->hw);
1197	em_if_init(ctx);
1198	em_init_manageability(adapter);
1199
1200	return(0);
1201}
1202
1203static int
1204em_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1205{
1206	int max_frame_size;
1207	struct adapter *adapter = iflib_get_softc(ctx);
1208	if_softc_ctx_t scctx = iflib_get_softc_ctx(ctx);
1209
1210	 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
1211
1212	switch (adapter->hw.mac.type) {
1213	case e1000_82571:
1214	case e1000_82572:
1215	case e1000_ich9lan:
1216	case e1000_ich10lan:
1217	case e1000_pch2lan:
1218	case e1000_pch_lpt:
1219	case e1000_pch_spt:
1220	case e1000_pch_cnp:
1221	case e1000_82574:
1222	case e1000_82583:
1223	case e1000_80003es2lan:
1224		/* 9K Jumbo Frame size */
1225		max_frame_size = 9234;
1226		break;
1227	case e1000_pchlan:
1228		max_frame_size = 4096;
1229		break;
1230	case e1000_82542:
1231	case e1000_ich8lan:
1232		/* Adapters that do not support jumbo frames */
1233		max_frame_size = ETHER_MAX_LEN;
1234		break;
1235	default:
1236		if (adapter->hw.mac.type >= igb_mac_min)
1237			max_frame_size = 9234;
1238		else /* lem */
1239			max_frame_size = MAX_JUMBO_FRAME_SIZE;
1240	}
1241	if (mtu > max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN) {
1242		return (EINVAL);
1243	}
1244
1245	scctx->isc_max_frame_size = adapter->hw.mac.max_frame_size =
1246	    mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1247	return (0);
1248}
1249
1250/*********************************************************************
1251 *  Init entry point
1252 *
1253 *  This routine is used in two ways. It is used by the stack as
1254 *  init entry point in network interface structure. It is also used
1255 *  by the driver as a hw/sw initialization routine to get to a
1256 *  consistent state.
1257 *
1258 **********************************************************************/
1259static void
1260em_if_init(if_ctx_t ctx)
1261{
1262	struct adapter *adapter = iflib_get_softc(ctx);
1263	if_softc_ctx_t scctx = adapter->shared;
1264	struct ifnet *ifp = iflib_get_ifp(ctx);
1265	struct em_tx_queue *tx_que;
1266	int i;
1267
1268	INIT_DEBUGOUT("em_if_init: begin");
1269
1270	/* Get the latest mac address, User can use a LAA */
1271	bcopy(if_getlladdr(ifp), adapter->hw.mac.addr,
1272	    ETHER_ADDR_LEN);
1273
1274	/* Put the address into the Receive Address Array */
1275	e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1276
1277	/*
1278	 * With the 82571 adapter, RAR[0] may be overwritten
1279	 * when the other port is reset, we make a duplicate
1280	 * in RAR[14] for that eventuality, this assures
1281	 * the interface continues to function.
1282	 */
1283	if (adapter->hw.mac.type == e1000_82571) {
1284		e1000_set_laa_state_82571(&adapter->hw, TRUE);
1285		e1000_rar_set(&adapter->hw, adapter->hw.mac.addr,
1286		    E1000_RAR_ENTRIES - 1);
1287	}
1288
1289
1290	/* Initialize the hardware */
1291	em_reset(ctx);
1292	em_if_update_admin_status(ctx);
1293
1294	for (i = 0, tx_que = adapter->tx_queues; i < adapter->tx_num_queues; i++, tx_que++) {
1295		struct tx_ring *txr = &tx_que->txr;
1296
1297		txr->tx_rs_cidx = txr->tx_rs_pidx;
1298
1299		/* Initialize the last processed descriptor to be the end of
1300		 * the ring, rather than the start, so that we avoid an
1301		 * off-by-one error when calculating how many descriptors are
1302		 * done in the credits_update function.
1303		 */
1304		txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
1305	}
1306
1307	/* Setup VLAN support, basic and offload if available */
1308	E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1309
1310	/* Clear bad data from Rx FIFOs */
1311	if (adapter->hw.mac.type >= igb_mac_min)
1312		e1000_rx_fifo_flush_82575(&adapter->hw);
1313
1314	/* Configure for OS presence */
1315	em_init_manageability(adapter);
1316
1317	/* Prepare transmit descriptors and buffers */
1318	em_initialize_transmit_unit(ctx);
1319
1320	/* Setup Multicast table */
1321	em_if_multi_set(ctx);
1322
1323	adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
1324	em_initialize_receive_unit(ctx);
1325
1326	/* Use real VLAN Filter support? */
1327	if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) {
1328		if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
1329			/* Use real VLAN Filter support */
1330			em_setup_vlan_hw_support(adapter);
1331		else {
1332			u32 ctrl;
1333			ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1334			ctrl |= E1000_CTRL_VME;
1335			E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1336		}
1337	} else {
1338		u32 ctrl;
1339		ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1340		ctrl &= ~E1000_CTRL_VME;
1341		E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1342	}
1343
1344	/* Don't lose promiscuous settings */
1345	em_if_set_promisc(ctx, if_getflags(ifp));
1346	e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1347
1348	/* MSI-X configuration for 82574 */
1349	if (adapter->hw.mac.type == e1000_82574) {
1350		int tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
1351
1352		tmp |= E1000_CTRL_EXT_PBA_CLR;
1353		E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
1354		/* Set the IVAR - interrupt vector routing. */
1355		E1000_WRITE_REG(&adapter->hw, E1000_IVAR, adapter->ivars);
1356	} else if (adapter->intr_type == IFLIB_INTR_MSIX) /* Set up queue routing */
1357		igb_configure_queues(adapter);
1358
1359	/* this clears any pending interrupts */
1360	E1000_READ_REG(&adapter->hw, E1000_ICR);
1361	E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC);
1362
1363	/* AMT based hardware can now take control from firmware */
1364	if (adapter->has_manage && adapter->has_amt)
1365		em_get_hw_control(adapter);
1366
1367	/* Set Energy Efficient Ethernet */
1368	if (adapter->hw.mac.type >= igb_mac_min &&
1369	    adapter->hw.phy.media_type == e1000_media_type_copper) {
1370		if (adapter->hw.mac.type == e1000_i354)
1371			e1000_set_eee_i354(&adapter->hw, TRUE, TRUE);
1372		else
1373			e1000_set_eee_i350(&adapter->hw, TRUE, TRUE);
1374	}
1375}
1376
1377/*********************************************************************
1378 *
1379 *  Fast Legacy/MSI Combined Interrupt Service routine
1380 *
1381 *********************************************************************/
1382int
1383em_intr(void *arg)
1384{
1385	struct adapter *adapter = arg;
1386	if_ctx_t ctx = adapter->ctx;
1387	u32 reg_icr;
1388
1389	reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1390
1391	/* Hot eject? */
1392	if (reg_icr == 0xffffffff)
1393		return FILTER_STRAY;
1394
1395	/* Definitely not our interrupt. */
1396	if (reg_icr == 0x0)
1397		return FILTER_STRAY;
1398
1399	/*
1400	 * Starting with the 82571 chip, bit 31 should be used to
1401	 * determine whether the interrupt belongs to us.
1402	 */
1403	if (adapter->hw.mac.type >= e1000_82571 &&
1404	    (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1405		return FILTER_STRAY;
1406
1407	/*
1408	 * Only MSI-X interrupts have one-shot behavior by taking advantage
1409	 * of the EIAC register.  Thus, explicitly disable interrupts.  This
1410	 * also works around the MSI message reordering errata on certain
1411	 * systems.
1412	 */
1413	IFDI_INTR_DISABLE(ctx);
1414
1415	/* Link status change */
1416	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))
1417		em_handle_link(ctx);
1418
1419	if (reg_icr & E1000_ICR_RXO)
1420		adapter->rx_overruns++;
1421
1422	return (FILTER_SCHEDULE_THREAD);
1423}
1424
1425static int
1426em_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1427{
1428	struct adapter *adapter = iflib_get_softc(ctx);
1429	struct em_rx_queue *rxq = &adapter->rx_queues[rxqid];
1430
1431	E1000_WRITE_REG(&adapter->hw, E1000_IMS, rxq->eims);
1432	return (0);
1433}
1434
1435static int
1436em_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
1437{
1438	struct adapter *adapter = iflib_get_softc(ctx);
1439	struct em_tx_queue *txq = &adapter->tx_queues[txqid];
1440
1441	E1000_WRITE_REG(&adapter->hw, E1000_IMS, txq->eims);
1442	return (0);
1443}
1444
1445static int
1446igb_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1447{
1448	struct adapter *adapter = iflib_get_softc(ctx);
1449	struct em_rx_queue *rxq = &adapter->rx_queues[rxqid];
1450
1451	E1000_WRITE_REG(&adapter->hw, E1000_EIMS, rxq->eims);
1452	return (0);
1453}
1454
1455static int
1456igb_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
1457{
1458	struct adapter *adapter = iflib_get_softc(ctx);
1459	struct em_tx_queue *txq = &adapter->tx_queues[txqid];
1460
1461	E1000_WRITE_REG(&adapter->hw, E1000_EIMS, txq->eims);
1462	return (0);
1463}
1464
1465/*********************************************************************
1466 *
1467 *  MSI-X RX Interrupt Service routine
1468 *
1469 **********************************************************************/
1470static int
1471em_msix_que(void *arg)
1472{
1473	struct em_rx_queue *que = arg;
1474
1475	++que->irqs;
1476
1477	return (FILTER_SCHEDULE_THREAD);
1478}
1479
1480/*********************************************************************
1481 *
1482 *  MSI-X Link Fast Interrupt Service routine
1483 *
1484 **********************************************************************/
1485static int
1486em_msix_link(void *arg)
1487{
1488	struct adapter *adapter = arg;
1489	u32 reg_icr;
1490
1491	++adapter->link_irq;
1492	MPASS(adapter->hw.back != NULL);
1493	reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1494
1495	if (reg_icr & E1000_ICR_RXO)
1496		adapter->rx_overruns++;
1497
1498	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1499		em_handle_link(adapter->ctx);
1500	} else if (adapter->hw.mac.type == e1000_82574) {
1501		/* Only re-arm 82574 if em_if_update_admin_status() won't. */
1502		E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_LINK |
1503		    E1000_IMS_LSC);
1504	}
1505
1506	if (adapter->hw.mac.type == e1000_82574) {
1507		/*
1508		 * Because we must read the ICR for this interrupt it may
1509		 * clear other causes using autoclear, for this reason we
1510		 * simply create a soft interrupt for all these vectors.
1511		 */
1512		if (reg_icr)
1513			E1000_WRITE_REG(&adapter->hw, E1000_ICS, adapter->ims);
1514	} else {
1515		/* Re-arm unconditionally */
1516		E1000_WRITE_REG(&adapter->hw, E1000_IMS, E1000_IMS_LSC);
1517		E1000_WRITE_REG(&adapter->hw, E1000_EIMS, adapter->link_mask);
1518	}
1519
1520	return (FILTER_HANDLED);
1521}
1522
1523static void
1524em_handle_link(void *context)
1525{
1526	if_ctx_t ctx = context;
1527	struct adapter *adapter = iflib_get_softc(ctx);
1528
1529	adapter->hw.mac.get_link_status = 1;
1530	iflib_admin_intr_deferred(ctx);
1531}
1532
1533/*********************************************************************
1534 *
1535 *  Media Ioctl callback
1536 *
1537 *  This routine is called whenever the user queries the status of
1538 *  the interface using ifconfig.
1539 *
1540 **********************************************************************/
1541static void
1542em_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1543{
1544	struct adapter *adapter = iflib_get_softc(ctx);
1545	u_char fiber_type = IFM_1000_SX;
1546
1547	INIT_DEBUGOUT("em_if_media_status: begin");
1548
1549	iflib_admin_intr_deferred(ctx);
1550
1551	ifmr->ifm_status = IFM_AVALID;
1552	ifmr->ifm_active = IFM_ETHER;
1553
1554	if (!adapter->link_active) {
1555		return;
1556	}
1557
1558	ifmr->ifm_status |= IFM_ACTIVE;
1559
1560	if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1561	    (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1562		if (adapter->hw.mac.type == e1000_82545)
1563			fiber_type = IFM_1000_LX;
1564		ifmr->ifm_active |= fiber_type | IFM_FDX;
1565	} else {
1566		switch (adapter->link_speed) {
1567		case 10:
1568			ifmr->ifm_active |= IFM_10_T;
1569			break;
1570		case 100:
1571			ifmr->ifm_active |= IFM_100_TX;
1572			break;
1573		case 1000:
1574			ifmr->ifm_active |= IFM_1000_T;
1575			break;
1576		}
1577		if (adapter->link_duplex == FULL_DUPLEX)
1578			ifmr->ifm_active |= IFM_FDX;
1579		else
1580			ifmr->ifm_active |= IFM_HDX;
1581	}
1582}
1583
1584/*********************************************************************
1585 *
1586 *  Media Ioctl callback
1587 *
1588 *  This routine is called when the user changes speed/duplex using
1589 *  media/mediopt option with ifconfig.
1590 *
1591 **********************************************************************/
1592static int
1593em_if_media_change(if_ctx_t ctx)
1594{
1595	struct adapter *adapter = iflib_get_softc(ctx);
1596	struct ifmedia *ifm = iflib_get_media(ctx);
1597
1598	INIT_DEBUGOUT("em_if_media_change: begin");
1599
1600	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1601		return (EINVAL);
1602
1603	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1604	case IFM_AUTO:
1605		adapter->hw.mac.autoneg = DO_AUTO_NEG;
1606		adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1607		break;
1608	case IFM_1000_LX:
1609	case IFM_1000_SX:
1610	case IFM_1000_T:
1611		adapter->hw.mac.autoneg = DO_AUTO_NEG;
1612		adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1613		break;
1614	case IFM_100_TX:
1615		adapter->hw.mac.autoneg = FALSE;
1616		adapter->hw.phy.autoneg_advertised = 0;
1617		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1618			adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1619		else
1620			adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1621		break;
1622	case IFM_10_T:
1623		adapter->hw.mac.autoneg = FALSE;
1624		adapter->hw.phy.autoneg_advertised = 0;
1625		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1626			adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1627		else
1628			adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1629		break;
1630	default:
1631		device_printf(adapter->dev, "Unsupported media type\n");
1632	}
1633
1634	em_if_init(ctx);
1635
1636	return (0);
1637}
1638
1639static int
1640em_if_set_promisc(if_ctx_t ctx, int flags)
1641{
1642	struct adapter *adapter = iflib_get_softc(ctx);
1643	u32 reg_rctl;
1644
1645	em_disable_promisc(ctx);
1646
1647	reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1648
1649	if (flags & IFF_PROMISC) {
1650		reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1651		/* Turn this on if you want to see bad packets */
1652		if (em_debug_sbp)
1653			reg_rctl |= E1000_RCTL_SBP;
1654		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1655	} else if (flags & IFF_ALLMULTI) {
1656		reg_rctl |= E1000_RCTL_MPE;
1657		reg_rctl &= ~E1000_RCTL_UPE;
1658		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1659	}
1660	return (0);
1661}
1662
1663static void
1664em_disable_promisc(if_ctx_t ctx)
1665{
1666	struct adapter *adapter = iflib_get_softc(ctx);
1667	struct ifnet *ifp = iflib_get_ifp(ctx);
1668	u32 reg_rctl;
1669	int mcnt = 0;
1670
1671	reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1672	reg_rctl &= (~E1000_RCTL_UPE);
1673	if (if_getflags(ifp) & IFF_ALLMULTI)
1674		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
1675	else
1676		mcnt = if_llmaddr_count(ifp);
1677	/* Don't disable if in MAX groups */
1678	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
1679		reg_rctl &=  (~E1000_RCTL_MPE);
1680	reg_rctl &=  (~E1000_RCTL_SBP);
1681	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1682}
1683
1684
1685static u_int
1686em_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
1687{
1688	u8 *mta = arg;
1689
1690	if (cnt == MAX_NUM_MULTICAST_ADDRESSES)
1691		return (1);
1692
1693	bcopy(LLADDR(sdl), &mta[cnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN);
1694
1695	return (1);
1696}
1697
1698/*********************************************************************
1699 *  Multicast Update
1700 *
1701 *  This routine is called whenever multicast address list is updated.
1702 *
1703 **********************************************************************/
1704
1705static void
1706em_if_multi_set(if_ctx_t ctx)
1707{
1708	struct adapter *adapter = iflib_get_softc(ctx);
1709	struct ifnet *ifp = iflib_get_ifp(ctx);
1710	u32 reg_rctl = 0;
1711	u8  *mta; /* Multicast array memory */
1712	int mcnt = 0;
1713
1714	IOCTL_DEBUGOUT("em_set_multi: begin");
1715
1716	mta = adapter->mta;
1717	bzero(mta, sizeof(u8) * ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
1718
1719	if (adapter->hw.mac.type == e1000_82542 &&
1720	    adapter->hw.revision_id == E1000_REVISION_2) {
1721		reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1722		if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1723			e1000_pci_clear_mwi(&adapter->hw);
1724		reg_rctl |= E1000_RCTL_RST;
1725		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1726		msec_delay(5);
1727	}
1728
1729	mcnt = if_foreach_llmaddr(ifp, em_copy_maddr, mta);
1730
1731	if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1732		reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1733		reg_rctl |= E1000_RCTL_MPE;
1734		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1735	} else
1736		e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
1737
1738	if (adapter->hw.mac.type == e1000_82542 &&
1739	    adapter->hw.revision_id == E1000_REVISION_2) {
1740		reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1741		reg_rctl &= ~E1000_RCTL_RST;
1742		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1743		msec_delay(5);
1744		if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1745			e1000_pci_set_mwi(&adapter->hw);
1746	}
1747}
1748
1749/*********************************************************************
1750 *  Timer routine
1751 *
1752 *  This routine schedules em_if_update_admin_status() to check for
1753 *  link status and to gather statistics as well as to perform some
1754 *  controller-specific hardware patting.
1755 *
1756 **********************************************************************/
1757static void
1758em_if_timer(if_ctx_t ctx, uint16_t qid)
1759{
1760
1761	if (qid != 0)
1762		return;
1763
1764	iflib_admin_intr_deferred(ctx);
1765}
1766
1767static void
1768em_if_update_admin_status(if_ctx_t ctx)
1769{
1770	struct adapter *adapter = iflib_get_softc(ctx);
1771	struct e1000_hw *hw = &adapter->hw;
1772	device_t dev = iflib_get_dev(ctx);
1773	u32 link_check, thstat, ctrl;
1774
1775	link_check = thstat = ctrl = 0;
1776	/* Get the cached link value or read phy for real */
1777	switch (hw->phy.media_type) {
1778	case e1000_media_type_copper:
1779		if (hw->mac.get_link_status) {
1780			if (hw->mac.type == e1000_pch_spt)
1781				msec_delay(50);
1782			/* Do the work to read phy */
1783			e1000_check_for_link(hw);
1784			link_check = !hw->mac.get_link_status;
1785			if (link_check) /* ESB2 fix */
1786				e1000_cfg_on_link_up(hw);
1787		} else {
1788			link_check = TRUE;
1789		}
1790		break;
1791	case e1000_media_type_fiber:
1792		e1000_check_for_link(hw);
1793		link_check = (E1000_READ_REG(hw, E1000_STATUS) &
1794			    E1000_STATUS_LU);
1795		break;
1796	case e1000_media_type_internal_serdes:
1797		e1000_check_for_link(hw);
1798		link_check = adapter->hw.mac.serdes_has_link;
1799		break;
1800	/* VF device is type_unknown */
1801	case e1000_media_type_unknown:
1802		e1000_check_for_link(hw);
1803		link_check = !hw->mac.get_link_status;
1804		/* FALLTHROUGH */
1805	default:
1806		break;
1807	}
1808
1809	/* Check for thermal downshift or shutdown */
1810	if (hw->mac.type == e1000_i350) {
1811		thstat = E1000_READ_REG(hw, E1000_THSTAT);
1812		ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT);
1813	}
1814
1815	/* Now check for a transition */
1816	if (link_check && (adapter->link_active == 0)) {
1817		e1000_get_speed_and_duplex(hw, &adapter->link_speed,
1818		    &adapter->link_duplex);
1819		/* Check if we must disable SPEED_MODE bit on PCI-E */
1820		if ((adapter->link_speed != SPEED_1000) &&
1821		    ((hw->mac.type == e1000_82571) ||
1822		    (hw->mac.type == e1000_82572))) {
1823			int tarc0;
1824			tarc0 = E1000_READ_REG(hw, E1000_TARC(0));
1825			tarc0 &= ~TARC_SPEED_MODE_BIT;
1826			E1000_WRITE_REG(hw, E1000_TARC(0), tarc0);
1827		}
1828		if (bootverbose)
1829			device_printf(dev, "Link is up %d Mbps %s\n",
1830			    adapter->link_speed,
1831			    ((adapter->link_duplex == FULL_DUPLEX) ?
1832			    "Full Duplex" : "Half Duplex"));
1833		adapter->link_active = 1;
1834		adapter->smartspeed = 0;
1835		if ((ctrl & E1000_CTRL_EXT_LINK_MODE_MASK) ==
1836		    E1000_CTRL_EXT_LINK_MODE_GMII &&
1837		    (thstat & E1000_THSTAT_LINK_THROTTLE))
1838			device_printf(dev, "Link: thermal downshift\n");
1839		/* Delay Link Up for Phy update */
1840		if (((hw->mac.type == e1000_i210) ||
1841		    (hw->mac.type == e1000_i211)) &&
1842		    (hw->phy.id == I210_I_PHY_ID))
1843			msec_delay(I210_LINK_DELAY);
1844		/* Reset if the media type changed. */
1845		if ((hw->dev_spec._82575.media_changed) &&
1846			(adapter->hw.mac.type >= igb_mac_min)) {
1847			hw->dev_spec._82575.media_changed = false;
1848			adapter->flags |= IGB_MEDIA_RESET;
1849			em_reset(ctx);
1850		}
1851		iflib_link_state_change(ctx, LINK_STATE_UP,
1852		    IF_Mbps(adapter->link_speed));
1853	} else if (!link_check && (adapter->link_active == 1)) {
1854		adapter->link_speed = 0;
1855		adapter->link_duplex = 0;
1856		adapter->link_active = 0;
1857		iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
1858	}
1859	em_update_stats_counters(adapter);
1860
1861	/* Reset LAA into RAR[0] on 82571 */
1862	if (hw->mac.type == e1000_82571 && e1000_get_laa_state_82571(hw))
1863		e1000_rar_set(hw, hw->mac.addr, 0);
1864
1865	if (hw->mac.type < em_mac_min)
1866		lem_smartspeed(adapter);
1867	else if (hw->mac.type == e1000_82574 &&
1868	    adapter->intr_type == IFLIB_INTR_MSIX)
1869		E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_LINK |
1870		    E1000_IMS_LSC);
1871}
1872
1873static void
1874em_if_watchdog_reset(if_ctx_t ctx)
1875{
1876	struct adapter *adapter = iflib_get_softc(ctx);
1877
1878	/*
1879	 * Just count the event; iflib(4) will already trigger a
1880	 * sufficient reset of the controller.
1881	 */
1882	adapter->watchdog_events++;
1883}
1884
1885/*********************************************************************
1886 *
1887 *  This routine disables all traffic on the adapter by issuing a
1888 *  global reset on the MAC.
1889 *
1890 **********************************************************************/
1891static void
1892em_if_stop(if_ctx_t ctx)
1893{
1894	struct adapter *adapter = iflib_get_softc(ctx);
1895
1896	INIT_DEBUGOUT("em_if_stop: begin");
1897
1898	e1000_reset_hw(&adapter->hw);
1899	if (adapter->hw.mac.type >= e1000_82544)
1900		E1000_WRITE_REG(&adapter->hw, E1000_WUFC, 0);
1901
1902	e1000_led_off(&adapter->hw);
1903	e1000_cleanup_led(&adapter->hw);
1904}
1905
1906/*********************************************************************
1907 *
1908 *  Determine hardware revision.
1909 *
1910 **********************************************************************/
1911static void
1912em_identify_hardware(if_ctx_t ctx)
1913{
1914	device_t dev = iflib_get_dev(ctx);
1915	struct adapter *adapter = iflib_get_softc(ctx);
1916
1917	/* Make sure our PCI config space has the necessary stuff set */
1918	adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1919
1920	/* Save off the information about this board */
1921	adapter->hw.vendor_id = pci_get_vendor(dev);
1922	adapter->hw.device_id = pci_get_device(dev);
1923	adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1924	adapter->hw.subsystem_vendor_id =
1925	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
1926	adapter->hw.subsystem_device_id =
1927	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
1928
1929	/* Do Shared Code Init and Setup */
1930	if (e1000_set_mac_type(&adapter->hw)) {
1931		device_printf(dev, "Setup init failure\n");
1932		return;
1933	}
1934}
1935
1936static int
1937em_allocate_pci_resources(if_ctx_t ctx)
1938{
1939	struct adapter *adapter = iflib_get_softc(ctx);
1940	device_t dev = iflib_get_dev(ctx);
1941	int rid, val;
1942
1943	rid = PCIR_BAR(0);
1944	adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1945	    &rid, RF_ACTIVE);
1946	if (adapter->memory == NULL) {
1947		device_printf(dev, "Unable to allocate bus resource: memory\n");
1948		return (ENXIO);
1949	}
1950	adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->memory);
1951	adapter->osdep.mem_bus_space_handle =
1952	    rman_get_bushandle(adapter->memory);
1953	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
1954
1955	/* Only older adapters use IO mapping */
1956	if (adapter->hw.mac.type < em_mac_min &&
1957	    adapter->hw.mac.type > e1000_82543) {
1958		/* Figure our where our IO BAR is ? */
1959		for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
1960			val = pci_read_config(dev, rid, 4);
1961			if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
1962				break;
1963			}
1964			rid += 4;
1965			/* check for 64bit BAR */
1966			if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
1967				rid += 4;
1968		}
1969		if (rid >= PCIR_CIS) {
1970			device_printf(dev, "Unable to locate IO BAR\n");
1971			return (ENXIO);
1972		}
1973		adapter->ioport = bus_alloc_resource_any(dev, SYS_RES_IOPORT,
1974		    &rid, RF_ACTIVE);
1975		if (adapter->ioport == NULL) {
1976			device_printf(dev, "Unable to allocate bus resource: "
1977			    "ioport\n");
1978			return (ENXIO);
1979		}
1980		adapter->hw.io_base = 0;
1981		adapter->osdep.io_bus_space_tag =
1982		    rman_get_bustag(adapter->ioport);
1983		adapter->osdep.io_bus_space_handle =
1984		    rman_get_bushandle(adapter->ioport);
1985	}
1986
1987	adapter->hw.back = &adapter->osdep;
1988
1989	return (0);
1990}
1991
1992/*********************************************************************
1993 *
1994 *  Set up the MSI-X Interrupt handlers
1995 *
1996 **********************************************************************/
1997static int
1998em_if_msix_intr_assign(if_ctx_t ctx, int msix)
1999{
2000	struct adapter *adapter = iflib_get_softc(ctx);
2001	struct em_rx_queue *rx_que = adapter->rx_queues;
2002	struct em_tx_queue *tx_que = adapter->tx_queues;
2003	int error, rid, i, vector = 0, rx_vectors;
2004	char buf[16];
2005
2006	/* First set up ring resources */
2007	for (i = 0; i < adapter->rx_num_queues; i++, rx_que++, vector++) {
2008		rid = vector + 1;
2009		snprintf(buf, sizeof(buf), "rxq%d", i);
2010		error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, IFLIB_INTR_RXTX, em_msix_que, rx_que, rx_que->me, buf);
2011		if (error) {
2012			device_printf(iflib_get_dev(ctx), "Failed to allocate que int %d err: %d", i, error);
2013			adapter->rx_num_queues = i + 1;
2014			goto fail;
2015		}
2016
2017		rx_que->msix =  vector;
2018
2019		/*
2020		 * Set the bit to enable interrupt
2021		 * in E1000_IMS -- bits 20 and 21
2022		 * are for RX0 and RX1, note this has
2023		 * NOTHING to do with the MSI-X vector
2024		 */
2025		if (adapter->hw.mac.type == e1000_82574) {
2026			rx_que->eims = 1 << (20 + i);
2027			adapter->ims |= rx_que->eims;
2028			adapter->ivars |= (8 | rx_que->msix) << (i * 4);
2029		} else if (adapter->hw.mac.type == e1000_82575)
2030			rx_que->eims = E1000_EICR_TX_QUEUE0 << vector;
2031		else
2032			rx_que->eims = 1 << vector;
2033	}
2034	rx_vectors = vector;
2035
2036	vector = 0;
2037	for (i = 0; i < adapter->tx_num_queues; i++, tx_que++, vector++) {
2038		snprintf(buf, sizeof(buf), "txq%d", i);
2039		tx_que = &adapter->tx_queues[i];
2040		iflib_softirq_alloc_generic(ctx,
2041		    &adapter->rx_queues[i % adapter->rx_num_queues].que_irq,
2042		    IFLIB_INTR_TX, tx_que, tx_que->me, buf);
2043
2044		tx_que->msix = (vector % adapter->rx_num_queues);
2045
2046		/*
2047		 * Set the bit to enable interrupt
2048		 * in E1000_IMS -- bits 22 and 23
2049		 * are for TX0 and TX1, note this has
2050		 * NOTHING to do with the MSI-X vector
2051		 */
2052		if (adapter->hw.mac.type == e1000_82574) {
2053			tx_que->eims = 1 << (22 + i);
2054			adapter->ims |= tx_que->eims;
2055			adapter->ivars |= (8 | tx_que->msix) << (8 + (i * 4));
2056		} else if (adapter->hw.mac.type == e1000_82575) {
2057			tx_que->eims = E1000_EICR_TX_QUEUE0 << i;
2058		} else {
2059			tx_que->eims = 1 << i;
2060		}
2061	}
2062
2063	/* Link interrupt */
2064	rid = rx_vectors + 1;
2065	error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid, IFLIB_INTR_ADMIN, em_msix_link, adapter, 0, "aq");
2066
2067	if (error) {
2068		device_printf(iflib_get_dev(ctx), "Failed to register admin handler");
2069		goto fail;
2070	}
2071	adapter->linkvec = rx_vectors;
2072	if (adapter->hw.mac.type < igb_mac_min) {
2073		adapter->ivars |=  (8 | rx_vectors) << 16;
2074		adapter->ivars |= 0x80000000;
2075	}
2076	return (0);
2077fail:
2078	iflib_irq_free(ctx, &adapter->irq);
2079	rx_que = adapter->rx_queues;
2080	for (int i = 0; i < adapter->rx_num_queues; i++, rx_que++)
2081		iflib_irq_free(ctx, &rx_que->que_irq);
2082	return (error);
2083}
2084
2085static void
2086igb_configure_queues(struct adapter *adapter)
2087{
2088	struct e1000_hw *hw = &adapter->hw;
2089	struct em_rx_queue *rx_que;
2090	struct em_tx_queue *tx_que;
2091	u32 tmp, ivar = 0, newitr = 0;
2092
2093	/* First turn on RSS capability */
2094	if (adapter->hw.mac.type != e1000_82575)
2095		E1000_WRITE_REG(hw, E1000_GPIE,
2096		    E1000_GPIE_MSIX_MODE | E1000_GPIE_EIAME |
2097		    E1000_GPIE_PBA | E1000_GPIE_NSICR);
2098
2099	/* Turn on MSI-X */
2100	switch (adapter->hw.mac.type) {
2101	case e1000_82580:
2102	case e1000_i350:
2103	case e1000_i354:
2104	case e1000_i210:
2105	case e1000_i211:
2106	case e1000_vfadapt:
2107	case e1000_vfadapt_i350:
2108		/* RX entries */
2109		for (int i = 0; i < adapter->rx_num_queues; i++) {
2110			u32 index = i >> 1;
2111			ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
2112			rx_que = &adapter->rx_queues[i];
2113			if (i & 1) {
2114				ivar &= 0xFF00FFFF;
2115				ivar |= (rx_que->msix | E1000_IVAR_VALID) << 16;
2116			} else {
2117				ivar &= 0xFFFFFF00;
2118				ivar |= rx_que->msix | E1000_IVAR_VALID;
2119			}
2120			E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
2121		}
2122		/* TX entries */
2123		for (int i = 0; i < adapter->tx_num_queues; i++) {
2124			u32 index = i >> 1;
2125			ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
2126			tx_que = &adapter->tx_queues[i];
2127			if (i & 1) {
2128				ivar &= 0x00FFFFFF;
2129				ivar |= (tx_que->msix | E1000_IVAR_VALID) << 24;
2130			} else {
2131				ivar &= 0xFFFF00FF;
2132				ivar |= (tx_que->msix | E1000_IVAR_VALID) << 8;
2133			}
2134			E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
2135			adapter->que_mask |= tx_que->eims;
2136		}
2137
2138		/* And for the link interrupt */
2139		ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8;
2140		adapter->link_mask = 1 << adapter->linkvec;
2141		E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
2142		break;
2143	case e1000_82576:
2144		/* RX entries */
2145		for (int i = 0; i < adapter->rx_num_queues; i++) {
2146			u32 index = i & 0x7; /* Each IVAR has two entries */
2147			ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
2148			rx_que = &adapter->rx_queues[i];
2149			if (i < 8) {
2150				ivar &= 0xFFFFFF00;
2151				ivar |= rx_que->msix | E1000_IVAR_VALID;
2152			} else {
2153				ivar &= 0xFF00FFFF;
2154				ivar |= (rx_que->msix | E1000_IVAR_VALID) << 16;
2155			}
2156			E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
2157			adapter->que_mask |= rx_que->eims;
2158		}
2159		/* TX entries */
2160		for (int i = 0; i < adapter->tx_num_queues; i++) {
2161			u32 index = i & 0x7; /* Each IVAR has two entries */
2162			ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
2163			tx_que = &adapter->tx_queues[i];
2164			if (i < 8) {
2165				ivar &= 0xFFFF00FF;
2166				ivar |= (tx_que->msix | E1000_IVAR_VALID) << 8;
2167			} else {
2168				ivar &= 0x00FFFFFF;
2169				ivar |= (tx_que->msix | E1000_IVAR_VALID) << 24;
2170			}
2171			E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
2172			adapter->que_mask |= tx_que->eims;
2173		}
2174
2175		/* And for the link interrupt */
2176		ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8;
2177		adapter->link_mask = 1 << adapter->linkvec;
2178		E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
2179		break;
2180
2181	case e1000_82575:
2182		/* enable MSI-X support*/
2183		tmp = E1000_READ_REG(hw, E1000_CTRL_EXT);
2184		tmp |= E1000_CTRL_EXT_PBA_CLR;
2185		/* Auto-Mask interrupts upon ICR read. */
2186		tmp |= E1000_CTRL_EXT_EIAME;
2187		tmp |= E1000_CTRL_EXT_IRCA;
2188		E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp);
2189
2190		/* Queues */
2191		for (int i = 0; i < adapter->rx_num_queues; i++) {
2192			rx_que = &adapter->rx_queues[i];
2193			tmp = E1000_EICR_RX_QUEUE0 << i;
2194			tmp |= E1000_EICR_TX_QUEUE0 << i;
2195			rx_que->eims = tmp;
2196			E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0),
2197			    i, rx_que->eims);
2198			adapter->que_mask |= rx_que->eims;
2199		}
2200
2201		/* Link */
2202		E1000_WRITE_REG(hw, E1000_MSIXBM(adapter->linkvec),
2203		    E1000_EIMS_OTHER);
2204		adapter->link_mask |= E1000_EIMS_OTHER;
2205	default:
2206		break;
2207	}
2208
2209	/* Set the starting interrupt rate */
2210	if (em_max_interrupt_rate > 0)
2211		newitr = (4000000 / em_max_interrupt_rate) & 0x7FFC;
2212
2213	if (hw->mac.type == e1000_82575)
2214		newitr |= newitr << 16;
2215	else
2216		newitr |= E1000_EITR_CNT_IGNR;
2217
2218	for (int i = 0; i < adapter->rx_num_queues; i++) {
2219		rx_que = &adapter->rx_queues[i];
2220		E1000_WRITE_REG(hw, E1000_EITR(rx_que->msix), newitr);
2221	}
2222
2223	return;
2224}
2225
2226static void
2227em_free_pci_resources(if_ctx_t ctx)
2228{
2229	struct adapter *adapter = iflib_get_softc(ctx);
2230	struct em_rx_queue *que = adapter->rx_queues;
2231	device_t dev = iflib_get_dev(ctx);
2232
2233	/* Release all MSI-X queue resources */
2234	if (adapter->intr_type == IFLIB_INTR_MSIX)
2235		iflib_irq_free(ctx, &adapter->irq);
2236
2237	for (int i = 0; i < adapter->rx_num_queues; i++, que++) {
2238		iflib_irq_free(ctx, &que->que_irq);
2239	}
2240
2241	if (adapter->memory != NULL) {
2242		bus_release_resource(dev, SYS_RES_MEMORY,
2243		    rman_get_rid(adapter->memory), adapter->memory);
2244		adapter->memory = NULL;
2245	}
2246
2247	if (adapter->flash != NULL) {
2248		bus_release_resource(dev, SYS_RES_MEMORY,
2249		    rman_get_rid(adapter->flash), adapter->flash);
2250		adapter->flash = NULL;
2251	}
2252
2253	if (adapter->ioport != NULL) {
2254		bus_release_resource(dev, SYS_RES_IOPORT,
2255		    rman_get_rid(adapter->ioport), adapter->ioport);
2256		adapter->ioport = NULL;
2257	}
2258}
2259
2260/* Set up MSI or MSI-X */
2261static int
2262em_setup_msix(if_ctx_t ctx)
2263{
2264	struct adapter *adapter = iflib_get_softc(ctx);
2265
2266	if (adapter->hw.mac.type == e1000_82574) {
2267		em_enable_vectors_82574(ctx);
2268	}
2269	return (0);
2270}
2271
2272/*********************************************************************
2273 *
2274 *  Workaround for SmartSpeed on 82541 and 82547 controllers
2275 *
2276 **********************************************************************/
2277static void
2278lem_smartspeed(struct adapter *adapter)
2279{
2280	u16 phy_tmp;
2281
2282	if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
2283	    adapter->hw.mac.autoneg == 0 ||
2284	    (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
2285		return;
2286
2287	if (adapter->smartspeed == 0) {
2288		/* If Master/Slave config fault is asserted twice,
2289		 * we assume back-to-back */
2290		e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2291		if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2292			return;
2293		e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2294		if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2295			e1000_read_phy_reg(&adapter->hw,
2296			    PHY_1000T_CTRL, &phy_tmp);
2297			if(phy_tmp & CR_1000T_MS_ENABLE) {
2298				phy_tmp &= ~CR_1000T_MS_ENABLE;
2299				e1000_write_phy_reg(&adapter->hw,
2300				    PHY_1000T_CTRL, phy_tmp);
2301				adapter->smartspeed++;
2302				if(adapter->hw.mac.autoneg &&
2303				   !e1000_copper_link_autoneg(&adapter->hw) &&
2304				   !e1000_read_phy_reg(&adapter->hw,
2305				    PHY_CONTROL, &phy_tmp)) {
2306					phy_tmp |= (MII_CR_AUTO_NEG_EN |
2307						    MII_CR_RESTART_AUTO_NEG);
2308					e1000_write_phy_reg(&adapter->hw,
2309					    PHY_CONTROL, phy_tmp);
2310				}
2311			}
2312		}
2313		return;
2314	} else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2315		/* If still no link, perhaps using 2/3 pair cable */
2316		e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2317		phy_tmp |= CR_1000T_MS_ENABLE;
2318		e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2319		if(adapter->hw.mac.autoneg &&
2320		   !e1000_copper_link_autoneg(&adapter->hw) &&
2321		   !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
2322			phy_tmp |= (MII_CR_AUTO_NEG_EN |
2323				    MII_CR_RESTART_AUTO_NEG);
2324			e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
2325		}
2326	}
2327	/* Restart process after EM_SMARTSPEED_MAX iterations */
2328	if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2329		adapter->smartspeed = 0;
2330}
2331
2332/*********************************************************************
2333 *
2334 *  Initialize the DMA Coalescing feature
2335 *
2336 **********************************************************************/
2337static void
2338igb_init_dmac(struct adapter *adapter, u32 pba)
2339{
2340	device_t	dev = adapter->dev;
2341	struct e1000_hw *hw = &adapter->hw;
2342	u32 		dmac, reg = ~E1000_DMACR_DMAC_EN;
2343	u16		hwm;
2344	u16		max_frame_size;
2345
2346	if (hw->mac.type == e1000_i211)
2347		return;
2348
2349	max_frame_size = adapter->shared->isc_max_frame_size;
2350	if (hw->mac.type > e1000_82580) {
2351
2352		if (adapter->dmac == 0) { /* Disabling it */
2353			E1000_WRITE_REG(hw, E1000_DMACR, reg);
2354			return;
2355		} else
2356			device_printf(dev, "DMA Coalescing enabled\n");
2357
2358		/* Set starting threshold */
2359		E1000_WRITE_REG(hw, E1000_DMCTXTH, 0);
2360
2361		hwm = 64 * pba - max_frame_size / 16;
2362		if (hwm < 64 * (pba - 6))
2363			hwm = 64 * (pba - 6);
2364		reg = E1000_READ_REG(hw, E1000_FCRTC);
2365		reg &= ~E1000_FCRTC_RTH_COAL_MASK;
2366		reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
2367		    & E1000_FCRTC_RTH_COAL_MASK);
2368		E1000_WRITE_REG(hw, E1000_FCRTC, reg);
2369
2370
2371		dmac = pba - max_frame_size / 512;
2372		if (dmac < pba - 10)
2373			dmac = pba - 10;
2374		reg = E1000_READ_REG(hw, E1000_DMACR);
2375		reg &= ~E1000_DMACR_DMACTHR_MASK;
2376		reg |= ((dmac << E1000_DMACR_DMACTHR_SHIFT)
2377		    & E1000_DMACR_DMACTHR_MASK);
2378
2379		/* transition to L0x or L1 if available..*/
2380		reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
2381
2382		/* Check if status is 2.5Gb backplane connection
2383		* before configuration of watchdog timer, which is
2384		* in msec values in 12.8usec intervals
2385		* watchdog timer= msec values in 32usec intervals
2386		* for non 2.5Gb connection
2387		*/
2388		if (hw->mac.type == e1000_i354) {
2389			int status = E1000_READ_REG(hw, E1000_STATUS);
2390			if ((status & E1000_STATUS_2P5_SKU) &&
2391			    (!(status & E1000_STATUS_2P5_SKU_OVER)))
2392				reg |= ((adapter->dmac * 5) >> 6);
2393			else
2394				reg |= (adapter->dmac >> 5);
2395		} else {
2396			reg |= (adapter->dmac >> 5);
2397		}
2398
2399		E1000_WRITE_REG(hw, E1000_DMACR, reg);
2400
2401		E1000_WRITE_REG(hw, E1000_DMCRTRH, 0);
2402
2403		/* Set the interval before transition */
2404		reg = E1000_READ_REG(hw, E1000_DMCTLX);
2405		if (hw->mac.type == e1000_i350)
2406			reg |= IGB_DMCTLX_DCFLUSH_DIS;
2407		/*
2408		** in 2.5Gb connection, TTLX unit is 0.4 usec
2409		** which is 0x4*2 = 0xA. But delay is still 4 usec
2410		*/
2411		if (hw->mac.type == e1000_i354) {
2412			int status = E1000_READ_REG(hw, E1000_STATUS);
2413			if ((status & E1000_STATUS_2P5_SKU) &&
2414			    (!(status & E1000_STATUS_2P5_SKU_OVER)))
2415				reg |= 0xA;
2416			else
2417				reg |= 0x4;
2418		} else {
2419			reg |= 0x4;
2420		}
2421
2422		E1000_WRITE_REG(hw, E1000_DMCTLX, reg);
2423
2424		/* free space in tx packet buffer to wake from DMA coal */
2425		E1000_WRITE_REG(hw, E1000_DMCTXTH, (IGB_TXPBSIZE -
2426		    (2 * max_frame_size)) >> 6);
2427
2428		/* make low power state decision controlled by DMA coal */
2429		reg = E1000_READ_REG(hw, E1000_PCIEMISC);
2430		reg &= ~E1000_PCIEMISC_LX_DECISION;
2431		E1000_WRITE_REG(hw, E1000_PCIEMISC, reg);
2432
2433	} else if (hw->mac.type == e1000_82580) {
2434		u32 reg = E1000_READ_REG(hw, E1000_PCIEMISC);
2435		E1000_WRITE_REG(hw, E1000_PCIEMISC,
2436		    reg & ~E1000_PCIEMISC_LX_DECISION);
2437		E1000_WRITE_REG(hw, E1000_DMACR, 0);
2438	}
2439}
2440
2441/*********************************************************************
2442 *
2443 *  Initialize the hardware to a configuration as specified by the
2444 *  adapter structure.
2445 *
2446 **********************************************************************/
2447static void
2448em_reset(if_ctx_t ctx)
2449{
2450	device_t dev = iflib_get_dev(ctx);
2451	struct adapter *adapter = iflib_get_softc(ctx);
2452	struct ifnet *ifp = iflib_get_ifp(ctx);
2453	struct e1000_hw *hw = &adapter->hw;
2454	u16 rx_buffer_size;
2455	u32 pba;
2456
2457	INIT_DEBUGOUT("em_reset: begin");
2458	/* Let the firmware know the OS is in control */
2459	em_get_hw_control(adapter);
2460
2461	/* Set up smart power down as default off on newer adapters. */
2462	if (!em_smart_pwr_down && (hw->mac.type == e1000_82571 ||
2463	    hw->mac.type == e1000_82572)) {
2464		u16 phy_tmp = 0;
2465
2466		/* Speed up time to link by disabling smart power down. */
2467		e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
2468		phy_tmp &= ~IGP02E1000_PM_SPD;
2469		e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_tmp);
2470	}
2471
2472	/*
2473	 * Packet Buffer Allocation (PBA)
2474	 * Writing PBA sets the receive portion of the buffer
2475	 * the remainder is used for the transmit buffer.
2476	 */
2477	switch (hw->mac.type) {
2478	/* Total Packet Buffer on these is 48K */
2479	case e1000_82571:
2480	case e1000_82572:
2481	case e1000_80003es2lan:
2482			pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
2483		break;
2484	case e1000_82573: /* 82573: Total Packet Buffer is 32K */
2485			pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
2486		break;
2487	case e1000_82574:
2488	case e1000_82583:
2489			pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
2490		break;
2491	case e1000_ich8lan:
2492		pba = E1000_PBA_8K;
2493		break;
2494	case e1000_ich9lan:
2495	case e1000_ich10lan:
2496		/* Boost Receive side for jumbo frames */
2497		if (adapter->hw.mac.max_frame_size > 4096)
2498			pba = E1000_PBA_14K;
2499		else
2500			pba = E1000_PBA_10K;
2501		break;
2502	case e1000_pchlan:
2503	case e1000_pch2lan:
2504	case e1000_pch_lpt:
2505	case e1000_pch_spt:
2506	case e1000_pch_cnp:
2507		pba = E1000_PBA_26K;
2508		break;
2509	case e1000_82575:
2510		pba = E1000_PBA_32K;
2511		break;
2512	case e1000_82576:
2513	case e1000_vfadapt:
2514		pba = E1000_READ_REG(hw, E1000_RXPBS);
2515		pba &= E1000_RXPBS_SIZE_MASK_82576;
2516		break;
2517	case e1000_82580:
2518	case e1000_i350:
2519	case e1000_i354:
2520	case e1000_vfadapt_i350:
2521		pba = E1000_READ_REG(hw, E1000_RXPBS);
2522		pba = e1000_rxpbs_adjust_82580(pba);
2523		break;
2524	case e1000_i210:
2525	case e1000_i211:
2526		pba = E1000_PBA_34K;
2527		break;
2528	default:
2529		if (adapter->hw.mac.max_frame_size > 8192)
2530			pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
2531		else
2532			pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
2533	}
2534
2535	/* Special needs in case of Jumbo frames */
2536	if ((hw->mac.type == e1000_82575) && (ifp->if_mtu > ETHERMTU)) {
2537		u32 tx_space, min_tx, min_rx;
2538		pba = E1000_READ_REG(hw, E1000_PBA);
2539		tx_space = pba >> 16;
2540		pba &= 0xffff;
2541		min_tx = (adapter->hw.mac.max_frame_size +
2542		    sizeof(struct e1000_tx_desc) - ETHERNET_FCS_SIZE) * 2;
2543		min_tx = roundup2(min_tx, 1024);
2544		min_tx >>= 10;
2545		min_rx = adapter->hw.mac.max_frame_size;
2546		min_rx = roundup2(min_rx, 1024);
2547		min_rx >>= 10;
2548		if (tx_space < min_tx &&
2549		    ((min_tx - tx_space) < pba)) {
2550			pba = pba - (min_tx - tx_space);
2551			/*
2552			 * if short on rx space, rx wins
2553			 * and must trump tx adjustment
2554			 */
2555			if (pba < min_rx)
2556				pba = min_rx;
2557		}
2558		E1000_WRITE_REG(hw, E1000_PBA, pba);
2559	}
2560
2561	if (hw->mac.type < igb_mac_min)
2562		E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
2563
2564	INIT_DEBUGOUT1("em_reset: pba=%dK",pba);
2565
2566	/*
2567	 * These parameters control the automatic generation (Tx) and
2568	 * response (Rx) to Ethernet PAUSE frames.
2569	 * - High water mark should allow for at least two frames to be
2570	 *   received after sending an XOFF.
2571	 * - Low water mark works best when it is very near the high water mark.
2572	 *   This allows the receiver to restart by sending XON when it has
2573	 *   drained a bit. Here we use an arbitrary value of 1500 which will
2574	 *   restart after one full frame is pulled from the buffer. There
2575	 *   could be several smaller frames in the buffer and if so they will
2576	 *   not trigger the XON until their total number reduces the buffer
2577	 *   by 1500.
2578	 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2579	 */
2580	rx_buffer_size = (pba & 0xffff) << 10;
2581	hw->fc.high_water = rx_buffer_size -
2582	    roundup2(adapter->hw.mac.max_frame_size, 1024);
2583	hw->fc.low_water = hw->fc.high_water - 1500;
2584
2585	if (adapter->fc) /* locally set flow control value? */
2586		hw->fc.requested_mode = adapter->fc;
2587	else
2588		hw->fc.requested_mode = e1000_fc_full;
2589
2590	if (hw->mac.type == e1000_80003es2lan)
2591		hw->fc.pause_time = 0xFFFF;
2592	else
2593		hw->fc.pause_time = EM_FC_PAUSE_TIME;
2594
2595	hw->fc.send_xon = TRUE;
2596
2597	/* Device specific overrides/settings */
2598	switch (hw->mac.type) {
2599	case e1000_pchlan:
2600		/* Workaround: no TX flow ctrl for PCH */
2601		hw->fc.requested_mode = e1000_fc_rx_pause;
2602		hw->fc.pause_time = 0xFFFF; /* override */
2603		if (if_getmtu(ifp) > ETHERMTU) {
2604			hw->fc.high_water = 0x3500;
2605			hw->fc.low_water = 0x1500;
2606		} else {
2607			hw->fc.high_water = 0x5000;
2608			hw->fc.low_water = 0x3000;
2609		}
2610		hw->fc.refresh_time = 0x1000;
2611		break;
2612	case e1000_pch2lan:
2613	case e1000_pch_lpt:
2614	case e1000_pch_spt:
2615	case e1000_pch_cnp:
2616		hw->fc.high_water = 0x5C20;
2617		hw->fc.low_water = 0x5048;
2618		hw->fc.pause_time = 0x0650;
2619		hw->fc.refresh_time = 0x0400;
2620		/* Jumbos need adjusted PBA */
2621		if (if_getmtu(ifp) > ETHERMTU)
2622			E1000_WRITE_REG(hw, E1000_PBA, 12);
2623		else
2624			E1000_WRITE_REG(hw, E1000_PBA, 26);
2625		break;
2626	case e1000_82575:
2627	case e1000_82576:
2628		/* 8-byte granularity */
2629		hw->fc.low_water = hw->fc.high_water - 8;
2630		break;
2631	case e1000_82580:
2632	case e1000_i350:
2633	case e1000_i354:
2634	case e1000_i210:
2635	case e1000_i211:
2636	case e1000_vfadapt:
2637	case e1000_vfadapt_i350:
2638		/* 16-byte granularity */
2639		hw->fc.low_water = hw->fc.high_water - 16;
2640		break;
2641	case e1000_ich9lan:
2642	case e1000_ich10lan:
2643		if (if_getmtu(ifp) > ETHERMTU) {
2644			hw->fc.high_water = 0x2800;
2645			hw->fc.low_water = hw->fc.high_water - 8;
2646			break;
2647		}
2648		/* FALLTHROUGH */
2649	default:
2650		if (hw->mac.type == e1000_80003es2lan)
2651			hw->fc.pause_time = 0xFFFF;
2652		break;
2653	}
2654
2655	/* Issue a global reset */
2656	e1000_reset_hw(hw);
2657	if (adapter->hw.mac.type >= igb_mac_min) {
2658		E1000_WRITE_REG(hw, E1000_WUC, 0);
2659	} else {
2660		E1000_WRITE_REG(hw, E1000_WUFC, 0);
2661		em_disable_aspm(adapter);
2662	}
2663	if (adapter->flags & IGB_MEDIA_RESET) {
2664		e1000_setup_init_funcs(hw, TRUE);
2665		e1000_get_bus_info(hw);
2666		adapter->flags &= ~IGB_MEDIA_RESET;
2667	}
2668	/* and a re-init */
2669	if (e1000_init_hw(hw) < 0) {
2670		device_printf(dev, "Hardware Initialization Failed\n");
2671		return;
2672	}
2673	if (adapter->hw.mac.type >= igb_mac_min)
2674		igb_init_dmac(adapter, pba);
2675
2676	E1000_WRITE_REG(hw, E1000_VET, ETHERTYPE_VLAN);
2677	e1000_get_phy_info(hw);
2678	e1000_check_for_link(hw);
2679}
2680
2681/*
2682 * Initialise the RSS mapping for NICs that support multiple transmit/
2683 * receive rings.
2684 */
2685
2686#define RSSKEYLEN 10
2687static void
2688em_initialize_rss_mapping(struct adapter *adapter)
2689{
2690	uint8_t  rss_key[4 * RSSKEYLEN];
2691	uint32_t reta = 0;
2692	struct e1000_hw	*hw = &adapter->hw;
2693	int i;
2694
2695	/*
2696	 * Configure RSS key
2697	 */
2698	arc4rand(rss_key, sizeof(rss_key), 0);
2699	for (i = 0; i < RSSKEYLEN; ++i) {
2700		uint32_t rssrk = 0;
2701
2702		rssrk = EM_RSSRK_VAL(rss_key, i);
2703		E1000_WRITE_REG(hw,E1000_RSSRK(i), rssrk);
2704	}
2705
2706	/*
2707	 * Configure RSS redirect table in following fashion:
2708	 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
2709	 */
2710	for (i = 0; i < sizeof(reta); ++i) {
2711		uint32_t q;
2712
2713		q = (i % adapter->rx_num_queues) << 7;
2714		reta |= q << (8 * i);
2715	}
2716
2717	for (i = 0; i < 32; ++i)
2718		E1000_WRITE_REG(hw, E1000_RETA(i), reta);
2719
2720	E1000_WRITE_REG(hw, E1000_MRQC, E1000_MRQC_RSS_ENABLE_2Q |
2721			E1000_MRQC_RSS_FIELD_IPV4_TCP |
2722			E1000_MRQC_RSS_FIELD_IPV4 |
2723			E1000_MRQC_RSS_FIELD_IPV6_TCP_EX |
2724			E1000_MRQC_RSS_FIELD_IPV6_EX |
2725			E1000_MRQC_RSS_FIELD_IPV6);
2726}
2727
2728static void
2729igb_initialize_rss_mapping(struct adapter *adapter)
2730{
2731	struct e1000_hw *hw = &adapter->hw;
2732	int i;
2733	int queue_id;
2734	u32 reta;
2735	u32 rss_key[10], mrqc, shift = 0;
2736
2737	/* XXX? */
2738	if (adapter->hw.mac.type == e1000_82575)
2739		shift = 6;
2740
2741	/*
2742	 * The redirection table controls which destination
2743	 * queue each bucket redirects traffic to.
2744	 * Each DWORD represents four queues, with the LSB
2745	 * being the first queue in the DWORD.
2746	 *
2747	 * This just allocates buckets to queues using round-robin
2748	 * allocation.
2749	 *
2750	 * NOTE: It Just Happens to line up with the default
2751	 * RSS allocation method.
2752	 */
2753
2754	/* Warning FM follows */
2755	reta = 0;
2756	for (i = 0; i < 128; i++) {
2757#ifdef RSS
2758		queue_id = rss_get_indirection_to_bucket(i);
2759		/*
2760		 * If we have more queues than buckets, we'll
2761		 * end up mapping buckets to a subset of the
2762		 * queues.
2763		 *
2764		 * If we have more buckets than queues, we'll
2765		 * end up instead assigning multiple buckets
2766		 * to queues.
2767		 *
2768		 * Both are suboptimal, but we need to handle
2769		 * the case so we don't go out of bounds
2770		 * indexing arrays and such.
2771		 */
2772		queue_id = queue_id % adapter->rx_num_queues;
2773#else
2774		queue_id = (i % adapter->rx_num_queues);
2775#endif
2776		/* Adjust if required */
2777		queue_id = queue_id << shift;
2778
2779		/*
2780		 * The low 8 bits are for hash value (n+0);
2781		 * The next 8 bits are for hash value (n+1), etc.
2782		 */
2783		reta = reta >> 8;
2784		reta = reta | ( ((uint32_t) queue_id) << 24);
2785		if ((i & 3) == 3) {
2786			E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta);
2787			reta = 0;
2788		}
2789	}
2790
2791	/* Now fill in hash table */
2792
2793	/*
2794	 * MRQC: Multiple Receive Queues Command
2795	 * Set queuing to RSS control, number depends on the device.
2796	 */
2797	mrqc = E1000_MRQC_ENABLE_RSS_8Q;
2798
2799#ifdef RSS
2800	/* XXX ew typecasting */
2801	rss_getkey((uint8_t *) &rss_key);
2802#else
2803	arc4rand(&rss_key, sizeof(rss_key), 0);
2804#endif
2805	for (i = 0; i < 10; i++)
2806		E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key[i]);
2807
2808	/*
2809	 * Configure the RSS fields to hash upon.
2810	 */
2811	mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2812	    E1000_MRQC_RSS_FIELD_IPV4_TCP);
2813	mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2814	    E1000_MRQC_RSS_FIELD_IPV6_TCP);
2815	mrqc |=( E1000_MRQC_RSS_FIELD_IPV4_UDP |
2816	    E1000_MRQC_RSS_FIELD_IPV6_UDP);
2817	mrqc |=( E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2818	    E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2819
2820	E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2821}
2822
2823/*********************************************************************
2824 *
2825 *  Setup networking device structure and register interface media.
2826 *
2827 **********************************************************************/
2828static int
2829em_setup_interface(if_ctx_t ctx)
2830{
2831	struct ifnet *ifp = iflib_get_ifp(ctx);
2832	struct adapter *adapter = iflib_get_softc(ctx);
2833	if_softc_ctx_t scctx = adapter->shared;
2834
2835	INIT_DEBUGOUT("em_setup_interface: begin");
2836
2837	/* Single Queue */
2838	if (adapter->tx_num_queues == 1) {
2839		if_setsendqlen(ifp, scctx->isc_ntxd[0] - 1);
2840		if_setsendqready(ifp);
2841	}
2842
2843	/*
2844	 * Specify the media types supported by this adapter and register
2845	 * callbacks to update media and link information
2846	 */
2847	if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2848	    (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
2849		u_char fiber_type = IFM_1000_SX;	/* default type */
2850
2851		if (adapter->hw.mac.type == e1000_82545)
2852			fiber_type = IFM_1000_LX;
2853		ifmedia_add(adapter->media, IFM_ETHER | fiber_type | IFM_FDX, 0, NULL);
2854		ifmedia_add(adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2855	} else {
2856		ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2857		ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
2858		ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2859		ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
2860		if (adapter->hw.phy.type != e1000_phy_ife) {
2861			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2862			ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2863		}
2864	}
2865	ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2866	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
2867	return (0);
2868}
2869
2870static int
2871em_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
2872{
2873	struct adapter *adapter = iflib_get_softc(ctx);
2874	if_softc_ctx_t scctx = adapter->shared;
2875	int error = E1000_SUCCESS;
2876	struct em_tx_queue *que;
2877	int i, j;
2878
2879	MPASS(adapter->tx_num_queues > 0);
2880	MPASS(adapter->tx_num_queues == ntxqsets);
2881
2882	/* First allocate the top level queue structs */
2883	if (!(adapter->tx_queues =
2884	    (struct em_tx_queue *) malloc(sizeof(struct em_tx_queue) *
2885	    adapter->tx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2886		device_printf(iflib_get_dev(ctx), "Unable to allocate queue memory\n");
2887		return(ENOMEM);
2888	}
2889
2890	for (i = 0, que = adapter->tx_queues; i < adapter->tx_num_queues; i++, que++) {
2891		/* Set up some basics */
2892
2893		struct tx_ring *txr = &que->txr;
2894		txr->adapter = que->adapter = adapter;
2895		que->me = txr->me =  i;
2896
2897		/* Allocate report status array */
2898		if (!(txr->tx_rsq = (qidx_t *) malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
2899			device_printf(iflib_get_dev(ctx), "failed to allocate rs_idxs memory\n");
2900			error = ENOMEM;
2901			goto fail;
2902		}
2903		for (j = 0; j < scctx->isc_ntxd[0]; j++)
2904			txr->tx_rsq[j] = QIDX_INVALID;
2905		/* get the virtual and physical address of the hardware queues */
2906		txr->tx_base = (struct e1000_tx_desc *)vaddrs[i*ntxqs];
2907		txr->tx_paddr = paddrs[i*ntxqs];
2908	}
2909
2910	if (bootverbose)
2911		device_printf(iflib_get_dev(ctx),
2912		    "allocated for %d tx_queues\n", adapter->tx_num_queues);
2913	return (0);
2914fail:
2915	em_if_queues_free(ctx);
2916	return (error);
2917}
2918
2919static int
2920em_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
2921{
2922	struct adapter *adapter = iflib_get_softc(ctx);
2923	int error = E1000_SUCCESS;
2924	struct em_rx_queue *que;
2925	int i;
2926
2927	MPASS(adapter->rx_num_queues > 0);
2928	MPASS(adapter->rx_num_queues == nrxqsets);
2929
2930	/* First allocate the top level queue structs */
2931	if (!(adapter->rx_queues =
2932	    (struct em_rx_queue *) malloc(sizeof(struct em_rx_queue) *
2933	    adapter->rx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2934		device_printf(iflib_get_dev(ctx), "Unable to allocate queue memory\n");
2935		error = ENOMEM;
2936		goto fail;
2937	}
2938
2939	for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
2940		/* Set up some basics */
2941		struct rx_ring *rxr = &que->rxr;
2942		rxr->adapter = que->adapter = adapter;
2943		rxr->que = que;
2944		que->me = rxr->me =  i;
2945
2946		/* get the virtual and physical address of the hardware queues */
2947		rxr->rx_base = (union e1000_rx_desc_extended *)vaddrs[i*nrxqs];
2948		rxr->rx_paddr = paddrs[i*nrxqs];
2949	}
2950
2951	if (bootverbose)
2952		device_printf(iflib_get_dev(ctx),
2953		    "allocated for %d rx_queues\n", adapter->rx_num_queues);
2954
2955	return (0);
2956fail:
2957	em_if_queues_free(ctx);
2958	return (error);
2959}
2960
2961static void
2962em_if_queues_free(if_ctx_t ctx)
2963{
2964	struct adapter *adapter = iflib_get_softc(ctx);
2965	struct em_tx_queue *tx_que = adapter->tx_queues;
2966	struct em_rx_queue *rx_que = adapter->rx_queues;
2967
2968	if (tx_que != NULL) {
2969		for (int i = 0; i < adapter->tx_num_queues; i++, tx_que++) {
2970			struct tx_ring *txr = &tx_que->txr;
2971			if (txr->tx_rsq == NULL)
2972				break;
2973
2974			free(txr->tx_rsq, M_DEVBUF);
2975			txr->tx_rsq = NULL;
2976		}
2977		free(adapter->tx_queues, M_DEVBUF);
2978		adapter->tx_queues = NULL;
2979	}
2980
2981	if (rx_que != NULL) {
2982		free(adapter->rx_queues, M_DEVBUF);
2983		adapter->rx_queues = NULL;
2984	}
2985
2986	em_release_hw_control(adapter);
2987
2988	if (adapter->mta != NULL) {
2989		free(adapter->mta, M_DEVBUF);
2990	}
2991}
2992
2993/*********************************************************************
2994 *
2995 *  Enable transmit unit.
2996 *
2997 **********************************************************************/
2998static void
2999em_initialize_transmit_unit(if_ctx_t ctx)
3000{
3001	struct adapter *adapter = iflib_get_softc(ctx);
3002	if_softc_ctx_t scctx = adapter->shared;
3003	struct em_tx_queue *que;
3004	struct tx_ring	*txr;
3005	struct e1000_hw	*hw = &adapter->hw;
3006	u32 tctl, txdctl = 0, tarc, tipg = 0;
3007
3008	INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
3009
3010	for (int i = 0; i < adapter->tx_num_queues; i++, txr++) {
3011		u64 bus_addr;
3012		caddr_t offp, endp;
3013
3014		que = &adapter->tx_queues[i];
3015		txr = &que->txr;
3016		bus_addr = txr->tx_paddr;
3017
3018		/* Clear checksum offload context. */
3019		offp = (caddr_t)&txr->csum_flags;
3020		endp = (caddr_t)(txr + 1);
3021		bzero(offp, endp - offp);
3022
3023		/* Base and Len of TX Ring */
3024		E1000_WRITE_REG(hw, E1000_TDLEN(i),
3025		    scctx->isc_ntxd[0] * sizeof(struct e1000_tx_desc));
3026		E1000_WRITE_REG(hw, E1000_TDBAH(i),
3027		    (u32)(bus_addr >> 32));
3028		E1000_WRITE_REG(hw, E1000_TDBAL(i),
3029		    (u32)bus_addr);
3030		/* Init the HEAD/TAIL indices */
3031		E1000_WRITE_REG(hw, E1000_TDT(i), 0);
3032		E1000_WRITE_REG(hw, E1000_TDH(i), 0);
3033
3034		HW_DEBUGOUT2("Base = %x, Length = %x\n",
3035		    E1000_READ_REG(&adapter->hw, E1000_TDBAL(i)),
3036		    E1000_READ_REG(&adapter->hw, E1000_TDLEN(i)));
3037
3038		txdctl = 0; /* clear txdctl */
3039		txdctl |= 0x1f; /* PTHRESH */
3040		txdctl |= 1 << 8; /* HTHRESH */
3041		txdctl |= 1 << 16;/* WTHRESH */
3042		txdctl |= 1 << 22; /* Reserved bit 22 must always be 1 */
3043		txdctl |= E1000_TXDCTL_GRAN;
3044		txdctl |= 1 << 25; /* LWTHRESH */
3045
3046		E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
3047	}
3048
3049	/* Set the default values for the Tx Inter Packet Gap timer */
3050	switch (adapter->hw.mac.type) {
3051	case e1000_80003es2lan:
3052		tipg = DEFAULT_82543_TIPG_IPGR1;
3053		tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
3054		    E1000_TIPG_IPGR2_SHIFT;
3055		break;
3056	case e1000_82542:
3057		tipg = DEFAULT_82542_TIPG_IPGT;
3058		tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3059		tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3060		break;
3061	default:
3062		if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
3063		    (adapter->hw.phy.media_type ==
3064		    e1000_media_type_internal_serdes))
3065			tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
3066		else
3067			tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
3068		tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3069		tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3070	}
3071
3072	E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
3073	E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
3074
3075	if(adapter->hw.mac.type >= e1000_82540)
3076		E1000_WRITE_REG(&adapter->hw, E1000_TADV,
3077		    adapter->tx_abs_int_delay.value);
3078
3079	if ((adapter->hw.mac.type == e1000_82571) ||
3080	    (adapter->hw.mac.type == e1000_82572)) {
3081		tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3082		tarc |= TARC_SPEED_MODE_BIT;
3083		E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3084	} else if (adapter->hw.mac.type == e1000_80003es2lan) {
3085		/* errata: program both queues to unweighted RR */
3086		tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3087		tarc |= 1;
3088		E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3089		tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1));
3090		tarc |= 1;
3091		E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc);
3092	} else if (adapter->hw.mac.type == e1000_82574) {
3093		tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3094		tarc |= TARC_ERRATA_BIT;
3095		if ( adapter->tx_num_queues > 1) {
3096			tarc |= (TARC_COMPENSATION_MODE | TARC_MQ_FIX);
3097			E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3098			E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc);
3099		} else
3100			E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3101	}
3102
3103	if (adapter->tx_int_delay.value > 0)
3104		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3105
3106	/* Program the Transmit Control Register */
3107	tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
3108	tctl &= ~E1000_TCTL_CT;
3109	tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
3110		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
3111
3112	if (adapter->hw.mac.type >= e1000_82571)
3113		tctl |= E1000_TCTL_MULR;
3114
3115	/* This write will effectively turn on the transmit unit. */
3116	E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
3117
3118	/* SPT and KBL errata workarounds */
3119	if (hw->mac.type == e1000_pch_spt) {
3120		u32 reg;
3121		reg = E1000_READ_REG(hw, E1000_IOSFPC);
3122		reg |= E1000_RCTL_RDMTS_HEX;
3123		E1000_WRITE_REG(hw, E1000_IOSFPC, reg);
3124		/* i218-i219 Specification Update 1.5.4.5 */
3125		reg = E1000_READ_REG(hw, E1000_TARC(0));
3126		reg &= ~E1000_TARC0_CB_MULTIQ_3_REQ;
3127		reg |= E1000_TARC0_CB_MULTIQ_2_REQ;
3128		E1000_WRITE_REG(hw, E1000_TARC(0), reg);
3129	}
3130}
3131
3132/*********************************************************************
3133 *
3134 *  Enable receive unit.
3135 *
3136 **********************************************************************/
3137
3138static void
3139em_initialize_receive_unit(if_ctx_t ctx)
3140{
3141	struct adapter *adapter = iflib_get_softc(ctx);
3142	if_softc_ctx_t scctx = adapter->shared;
3143	struct ifnet *ifp = iflib_get_ifp(ctx);
3144	struct e1000_hw	*hw = &adapter->hw;
3145	struct em_rx_queue *que;
3146	int i;
3147	u32 rctl, rxcsum, rfctl;
3148
3149	INIT_DEBUGOUT("em_initialize_receive_units: begin");
3150
3151	/*
3152	 * Make sure receives are disabled while setting
3153	 * up the descriptor ring
3154	 */
3155	rctl = E1000_READ_REG(hw, E1000_RCTL);
3156	/* Do not disable if ever enabled on this hardware */
3157	if ((hw->mac.type != e1000_82574) && (hw->mac.type != e1000_82583))
3158		E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
3159
3160	/* Setup the Receive Control Register */
3161	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3162	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
3163	    E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
3164	    (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3165
3166	/* Do not store bad packets */
3167	rctl &= ~E1000_RCTL_SBP;
3168
3169	/* Enable Long Packet receive */
3170	if (if_getmtu(ifp) > ETHERMTU)
3171		rctl |= E1000_RCTL_LPE;
3172	else
3173		rctl &= ~E1000_RCTL_LPE;
3174
3175	/* Strip the CRC */
3176	if (!em_disable_crc_stripping)
3177		rctl |= E1000_RCTL_SECRC;
3178
3179	if (adapter->hw.mac.type >= e1000_82540) {
3180		E1000_WRITE_REG(&adapter->hw, E1000_RADV,
3181			    adapter->rx_abs_int_delay.value);
3182
3183		/*
3184		 * Set the interrupt throttling rate. Value is calculated
3185		 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
3186		 */
3187		E1000_WRITE_REG(hw, E1000_ITR, DEFAULT_ITR);
3188	}
3189	E1000_WRITE_REG(&adapter->hw, E1000_RDTR,
3190	    adapter->rx_int_delay.value);
3191
3192	/* Use extended rx descriptor formats */
3193	rfctl = E1000_READ_REG(hw, E1000_RFCTL);
3194	rfctl |= E1000_RFCTL_EXTEN;
3195	/*
3196	 * When using MSI-X interrupts we need to throttle
3197	 * using the EITR register (82574 only)
3198	 */
3199	if (hw->mac.type == e1000_82574) {
3200		for (int i = 0; i < 4; i++)
3201			E1000_WRITE_REG(hw, E1000_EITR_82574(i),
3202			    DEFAULT_ITR);
3203		/* Disable accelerated acknowledge */
3204		rfctl |= E1000_RFCTL_ACK_DIS;
3205	}
3206	E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
3207
3208	rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
3209	if (if_getcapenable(ifp) & IFCAP_RXCSUM &&
3210	    adapter->hw.mac.type >= e1000_82543) {
3211		if (adapter->tx_num_queues > 1) {
3212			if (adapter->hw.mac.type >= igb_mac_min) {
3213				rxcsum |= E1000_RXCSUM_PCSD;
3214				if (hw->mac.type != e1000_82575)
3215					rxcsum |= E1000_RXCSUM_CRCOFL;
3216			} else
3217				rxcsum |= E1000_RXCSUM_TUOFL |
3218					E1000_RXCSUM_IPOFL |
3219					E1000_RXCSUM_PCSD;
3220		} else {
3221			if (adapter->hw.mac.type >= igb_mac_min)
3222				rxcsum |= E1000_RXCSUM_IPPCSE;
3223			else
3224				rxcsum |= E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPOFL;
3225			if (adapter->hw.mac.type > e1000_82575)
3226				rxcsum |= E1000_RXCSUM_CRCOFL;
3227		}
3228	} else
3229		rxcsum &= ~E1000_RXCSUM_TUOFL;
3230
3231	E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
3232
3233	if (adapter->rx_num_queues > 1) {
3234		if (adapter->hw.mac.type >= igb_mac_min)
3235			igb_initialize_rss_mapping(adapter);
3236		else
3237			em_initialize_rss_mapping(adapter);
3238	}
3239
3240	/*
3241	 * XXX TEMPORARY WORKAROUND: on some systems with 82573
3242	 * long latencies are observed, like Lenovo X60. This
3243	 * change eliminates the problem, but since having positive
3244	 * values in RDTR is a known source of problems on other
3245	 * platforms another solution is being sought.
3246	 */
3247	if (hw->mac.type == e1000_82573)
3248		E1000_WRITE_REG(hw, E1000_RDTR, 0x20);
3249
3250	for (i = 0, que = adapter->rx_queues; i < adapter->rx_num_queues; i++, que++) {
3251		struct rx_ring *rxr = &que->rxr;
3252		/* Setup the Base and Length of the Rx Descriptor Ring */
3253		u64 bus_addr = rxr->rx_paddr;
3254#if 0
3255		u32 rdt = adapter->rx_num_queues -1;  /* default */
3256#endif
3257
3258		E1000_WRITE_REG(hw, E1000_RDLEN(i),
3259		    scctx->isc_nrxd[0] * sizeof(union e1000_rx_desc_extended));
3260		E1000_WRITE_REG(hw, E1000_RDBAH(i), (u32)(bus_addr >> 32));
3261		E1000_WRITE_REG(hw, E1000_RDBAL(i), (u32)bus_addr);
3262		/* Setup the Head and Tail Descriptor Pointers */
3263		E1000_WRITE_REG(hw, E1000_RDH(i), 0);
3264		E1000_WRITE_REG(hw, E1000_RDT(i), 0);
3265	}
3266
3267	/*
3268	 * Set PTHRESH for improved jumbo performance
3269	 * According to 10.2.5.11 of Intel 82574 Datasheet,
3270	 * RXDCTL(1) is written whenever RXDCTL(0) is written.
3271	 * Only write to RXDCTL(1) if there is a need for different
3272	 * settings.
3273	 */
3274
3275	if (((adapter->hw.mac.type == e1000_ich9lan) ||
3276	    (adapter->hw.mac.type == e1000_pch2lan) ||
3277	    (adapter->hw.mac.type == e1000_ich10lan)) &&
3278	    (if_getmtu(ifp) > ETHERMTU)) {
3279		u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
3280		E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
3281	} else if (adapter->hw.mac.type == e1000_82574) {
3282		for (int i = 0; i < adapter->rx_num_queues; i++) {
3283			u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
3284			rxdctl |= 0x20; /* PTHRESH */
3285			rxdctl |= 4 << 8; /* HTHRESH */
3286			rxdctl |= 4 << 16;/* WTHRESH */
3287			rxdctl |= 1 << 24; /* Switch to granularity */
3288			E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
3289		}
3290	} else if (adapter->hw.mac.type >= igb_mac_min) {
3291		u32 psize, srrctl = 0;
3292
3293		if (if_getmtu(ifp) > ETHERMTU) {
3294			/* Set maximum packet len */
3295			if (adapter->rx_mbuf_sz <= 4096) {
3296				srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
3297				rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX;
3298			} else if (adapter->rx_mbuf_sz > 4096) {
3299				srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
3300				rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX;
3301			}
3302			psize = scctx->isc_max_frame_size;
3303			/* are we on a vlan? */
3304			if (ifp->if_vlantrunk != NULL)
3305				psize += VLAN_TAG_SIZE;
3306			E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize);
3307		} else {
3308			srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
3309			rctl |= E1000_RCTL_SZ_2048;
3310		}
3311
3312		/*
3313		 * If TX flow control is disabled and there's >1 queue defined,
3314		 * enable DROP.
3315		 *
3316		 * This drops frames rather than hanging the RX MAC for all queues.
3317		 */
3318		if ((adapter->rx_num_queues > 1) &&
3319		    (adapter->fc == e1000_fc_none ||
3320		     adapter->fc == e1000_fc_rx_pause)) {
3321			srrctl |= E1000_SRRCTL_DROP_EN;
3322		}
3323			/* Setup the Base and Length of the Rx Descriptor Rings */
3324		for (i = 0, que = adapter->rx_queues; i < adapter->rx_num_queues; i++, que++) {
3325			struct rx_ring *rxr = &que->rxr;
3326			u64 bus_addr = rxr->rx_paddr;
3327			u32 rxdctl;
3328
3329#ifdef notyet
3330			/* Configure for header split? -- ignore for now */
3331			rxr->hdr_split = igb_header_split;
3332#else
3333			srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
3334#endif
3335
3336			E1000_WRITE_REG(hw, E1000_RDLEN(i),
3337					scctx->isc_nrxd[0] * sizeof(struct e1000_rx_desc));
3338			E1000_WRITE_REG(hw, E1000_RDBAH(i),
3339					(uint32_t)(bus_addr >> 32));
3340			E1000_WRITE_REG(hw, E1000_RDBAL(i),
3341					(uint32_t)bus_addr);
3342			E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
3343			/* Enable this Queue */
3344			rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
3345			rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
3346			rxdctl &= 0xFFF00000;
3347			rxdctl |= IGB_RX_PTHRESH;
3348			rxdctl |= IGB_RX_HTHRESH << 8;
3349			rxdctl |= IGB_RX_WTHRESH << 16;
3350			E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
3351		}
3352	} else if (adapter->hw.mac.type >= e1000_pch2lan) {
3353		if (if_getmtu(ifp) > ETHERMTU)
3354			e1000_lv_jumbo_workaround_ich8lan(hw, TRUE);
3355		else
3356			e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
3357	}
3358
3359	/* Make sure VLAN Filters are off */
3360	rctl &= ~E1000_RCTL_VFE;
3361
3362	if (adapter->hw.mac.type < igb_mac_min) {
3363		if (adapter->rx_mbuf_sz == MCLBYTES)
3364			rctl |= E1000_RCTL_SZ_2048;
3365		else if (adapter->rx_mbuf_sz == MJUMPAGESIZE)
3366			rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX;
3367		else if (adapter->rx_mbuf_sz > MJUMPAGESIZE)
3368			rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX;
3369
3370		/* ensure we clear use DTYPE of 00 here */
3371		rctl &= ~0x00000C00;
3372	}
3373
3374	/* Write out the settings */
3375	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
3376
3377	return;
3378}
3379
3380static void
3381em_if_vlan_register(if_ctx_t ctx, u16 vtag)
3382{
3383	struct adapter *adapter = iflib_get_softc(ctx);
3384	u32 index, bit;
3385
3386	index = (vtag >> 5) & 0x7F;
3387	bit = vtag & 0x1F;
3388	adapter->shadow_vfta[index] |= (1 << bit);
3389	++adapter->num_vlans;
3390}
3391
3392static void
3393em_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
3394{
3395	struct adapter *adapter = iflib_get_softc(ctx);
3396	u32 index, bit;
3397
3398	index = (vtag >> 5) & 0x7F;
3399	bit = vtag & 0x1F;
3400	adapter->shadow_vfta[index] &= ~(1 << bit);
3401	--adapter->num_vlans;
3402}
3403
3404static void
3405em_setup_vlan_hw_support(struct adapter *adapter)
3406{
3407	struct e1000_hw *hw = &adapter->hw;
3408	u32 reg;
3409
3410	/*
3411	 * We get here thru init_locked, meaning
3412	 * a soft reset, this has already cleared
3413	 * the VFTA and other state, so if there
3414	 * have been no vlan's registered do nothing.
3415	 */
3416	if (adapter->num_vlans == 0)
3417		return;
3418
3419	/*
3420	 * A soft reset zero's out the VFTA, so
3421	 * we need to repopulate it now.
3422	 */
3423	for (int i = 0; i < EM_VFTA_SIZE; i++)
3424		if (adapter->shadow_vfta[i] != 0)
3425			E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
3426			    i, adapter->shadow_vfta[i]);
3427
3428	reg = E1000_READ_REG(hw, E1000_CTRL);
3429	reg |= E1000_CTRL_VME;
3430	E1000_WRITE_REG(hw, E1000_CTRL, reg);
3431
3432	/* Enable the Filter Table */
3433	reg = E1000_READ_REG(hw, E1000_RCTL);
3434	reg &= ~E1000_RCTL_CFIEN;
3435	reg |= E1000_RCTL_VFE;
3436	E1000_WRITE_REG(hw, E1000_RCTL, reg);
3437}
3438
3439static void
3440em_if_intr_enable(if_ctx_t ctx)
3441{
3442	struct adapter *adapter = iflib_get_softc(ctx);
3443	struct e1000_hw *hw = &adapter->hw;
3444	u32 ims_mask = IMS_ENABLE_MASK;
3445
3446	if (hw->mac.type == e1000_82574) {
3447		E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK);
3448		ims_mask |= adapter->ims;
3449	}
3450	E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
3451}
3452
3453static void
3454em_if_intr_disable(if_ctx_t ctx)
3455{
3456	struct adapter *adapter = iflib_get_softc(ctx);
3457	struct e1000_hw *hw = &adapter->hw;
3458
3459	if (hw->mac.type == e1000_82574)
3460		E1000_WRITE_REG(hw, EM_EIAC, 0);
3461	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3462}
3463
3464static void
3465igb_if_intr_enable(if_ctx_t ctx)
3466{
3467	struct adapter *adapter = iflib_get_softc(ctx);
3468	struct e1000_hw *hw = &adapter->hw;
3469	u32 mask;
3470
3471	if (__predict_true(adapter->intr_type == IFLIB_INTR_MSIX)) {
3472		mask = (adapter->que_mask | adapter->link_mask);
3473		E1000_WRITE_REG(hw, E1000_EIAC, mask);
3474		E1000_WRITE_REG(hw, E1000_EIAM, mask);
3475		E1000_WRITE_REG(hw, E1000_EIMS, mask);
3476		E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_LSC);
3477	} else
3478		E1000_WRITE_REG(hw, E1000_IMS, IMS_ENABLE_MASK);
3479	E1000_WRITE_FLUSH(hw);
3480}
3481
3482static void
3483igb_if_intr_disable(if_ctx_t ctx)
3484{
3485	struct adapter *adapter = iflib_get_softc(ctx);
3486	struct e1000_hw *hw = &adapter->hw;
3487
3488	if (__predict_true(adapter->intr_type == IFLIB_INTR_MSIX)) {
3489		E1000_WRITE_REG(hw, E1000_EIMC, 0xffffffff);
3490		E1000_WRITE_REG(hw, E1000_EIAC, 0);
3491	}
3492	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3493	E1000_WRITE_FLUSH(hw);
3494}
3495
3496/*
3497 * Bit of a misnomer, what this really means is
3498 * to enable OS management of the system... aka
3499 * to disable special hardware management features
3500 */
3501static void
3502em_init_manageability(struct adapter *adapter)
3503{
3504	/* A shared code workaround */
3505#define E1000_82542_MANC2H E1000_MANC2H
3506	if (adapter->has_manage) {
3507		int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H);
3508		int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3509
3510		/* disable hardware interception of ARP */
3511		manc &= ~(E1000_MANC_ARP_EN);
3512
3513		/* enable receiving management packets to the host */
3514		manc |= E1000_MANC_EN_MNG2HOST;
3515#define E1000_MNG2HOST_PORT_623 (1 << 5)
3516#define E1000_MNG2HOST_PORT_664 (1 << 6)
3517		manc2h |= E1000_MNG2HOST_PORT_623;
3518		manc2h |= E1000_MNG2HOST_PORT_664;
3519		E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h);
3520		E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3521	}
3522}
3523
3524/*
3525 * Give control back to hardware management
3526 * controller if there is one.
3527 */
3528static void
3529em_release_manageability(struct adapter *adapter)
3530{
3531	if (adapter->has_manage) {
3532		int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3533
3534		/* re-enable hardware interception of ARP */
3535		manc |= E1000_MANC_ARP_EN;
3536		manc &= ~E1000_MANC_EN_MNG2HOST;
3537
3538		E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3539	}
3540}
3541
3542/*
3543 * em_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
3544 * For ASF and Pass Through versions of f/w this means
3545 * that the driver is loaded. For AMT version type f/w
3546 * this means that the network i/f is open.
3547 */
3548static void
3549em_get_hw_control(struct adapter *adapter)
3550{
3551	u32 ctrl_ext, swsm;
3552
3553	if (adapter->vf_ifp)
3554		return;
3555
3556	if (adapter->hw.mac.type == e1000_82573) {
3557		swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
3558		E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
3559		    swsm | E1000_SWSM_DRV_LOAD);
3560		return;
3561	}
3562	/* else */
3563	ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3564	E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3565	    ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
3566}
3567
3568/*
3569 * em_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3570 * For ASF and Pass Through versions of f/w this means that
3571 * the driver is no longer loaded. For AMT versions of the
3572 * f/w this means that the network i/f is closed.
3573 */
3574static void
3575em_release_hw_control(struct adapter *adapter)
3576{
3577	u32 ctrl_ext, swsm;
3578
3579	if (!adapter->has_manage)
3580		return;
3581
3582	if (adapter->hw.mac.type == e1000_82573) {
3583		swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
3584		E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
3585		    swsm & ~E1000_SWSM_DRV_LOAD);
3586		return;
3587	}
3588	/* else */
3589	ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3590	E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3591	    ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
3592	return;
3593}
3594
3595static int
3596em_is_valid_ether_addr(u8 *addr)
3597{
3598	char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3599
3600	if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3601		return (FALSE);
3602	}
3603
3604	return (TRUE);
3605}
3606
3607/*
3608** Parse the interface capabilities with regard
3609** to both system management and wake-on-lan for
3610** later use.
3611*/
3612static void
3613em_get_wakeup(if_ctx_t ctx)
3614{
3615	struct adapter *adapter = iflib_get_softc(ctx);
3616	device_t dev = iflib_get_dev(ctx);
3617	u16 eeprom_data = 0, device_id, apme_mask;
3618
3619	adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
3620	apme_mask = EM_EEPROM_APME;
3621
3622	switch (adapter->hw.mac.type) {
3623	case e1000_82542:
3624	case e1000_82543:
3625		break;
3626	case e1000_82544:
3627		e1000_read_nvm(&adapter->hw,
3628		    NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
3629		apme_mask = EM_82544_APME;
3630		break;
3631	case e1000_82546:
3632	case e1000_82546_rev_3:
3633		if (adapter->hw.bus.func == 1) {
3634			e1000_read_nvm(&adapter->hw,
3635			    NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3636			break;
3637		} else
3638			e1000_read_nvm(&adapter->hw,
3639			    NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3640		break;
3641	case e1000_82573:
3642	case e1000_82583:
3643		adapter->has_amt = TRUE;
3644		/* FALLTHROUGH */
3645	case e1000_82571:
3646	case e1000_82572:
3647	case e1000_80003es2lan:
3648		if (adapter->hw.bus.func == 1) {
3649			e1000_read_nvm(&adapter->hw,
3650			    NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3651			break;
3652		} else
3653			e1000_read_nvm(&adapter->hw,
3654			    NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3655		break;
3656	case e1000_ich8lan:
3657	case e1000_ich9lan:
3658	case e1000_ich10lan:
3659	case e1000_pchlan:
3660	case e1000_pch2lan:
3661	case e1000_pch_lpt:
3662	case e1000_pch_spt:
3663	case e1000_82575:	/* listing all igb devices */
3664	case e1000_82576:
3665	case e1000_82580:
3666	case e1000_i350:
3667	case e1000_i354:
3668	case e1000_i210:
3669	case e1000_i211:
3670	case e1000_vfadapt:
3671	case e1000_vfadapt_i350:
3672		apme_mask = E1000_WUC_APME;
3673		adapter->has_amt = TRUE;
3674		eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC);
3675		break;
3676	default:
3677		e1000_read_nvm(&adapter->hw,
3678		    NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3679		break;
3680	}
3681	if (eeprom_data & apme_mask)
3682		adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
3683	/*
3684	 * We have the eeprom settings, now apply the special cases
3685	 * where the eeprom may be wrong or the board won't support
3686	 * wake on lan on a particular port
3687	 */
3688	device_id = pci_get_device(dev);
3689	switch (device_id) {
3690	case E1000_DEV_ID_82546GB_PCIE:
3691		adapter->wol = 0;
3692		break;
3693	case E1000_DEV_ID_82546EB_FIBER:
3694	case E1000_DEV_ID_82546GB_FIBER:
3695		/* Wake events only supported on port A for dual fiber
3696		 * regardless of eeprom setting */
3697		if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
3698		    E1000_STATUS_FUNC_1)
3699			adapter->wol = 0;
3700		break;
3701	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
3702		/* if quad port adapter, disable WoL on all but port A */
3703		if (global_quad_port_a != 0)
3704			adapter->wol = 0;
3705		/* Reset for multiple quad port adapters */
3706		if (++global_quad_port_a == 4)
3707			global_quad_port_a = 0;
3708		break;
3709	case E1000_DEV_ID_82571EB_FIBER:
3710		/* Wake events only supported on port A for dual fiber
3711		 * regardless of eeprom setting */
3712		if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
3713		    E1000_STATUS_FUNC_1)
3714			adapter->wol = 0;
3715		break;
3716	case E1000_DEV_ID_82571EB_QUAD_COPPER:
3717	case E1000_DEV_ID_82571EB_QUAD_FIBER:
3718	case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
3719		/* if quad port adapter, disable WoL on all but port A */
3720		if (global_quad_port_a != 0)
3721			adapter->wol = 0;
3722		/* Reset for multiple quad port adapters */
3723		if (++global_quad_port_a == 4)
3724			global_quad_port_a = 0;
3725		break;
3726	}
3727	return;
3728}
3729
3730
3731/*
3732 * Enable PCI Wake On Lan capability
3733 */
3734static void
3735em_enable_wakeup(if_ctx_t ctx)
3736{
3737	struct adapter *adapter = iflib_get_softc(ctx);
3738	device_t dev = iflib_get_dev(ctx);
3739	if_t ifp = iflib_get_ifp(ctx);
3740	int error = 0;
3741	u32 pmc, ctrl, ctrl_ext, rctl;
3742	u16 status;
3743
3744	if (pci_find_cap(dev, PCIY_PMG, &pmc) != 0)
3745		return;
3746
3747	/*
3748	 * Determine type of Wakeup: note that wol
3749	 * is set with all bits on by default.
3750	 */
3751	if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) == 0)
3752		adapter->wol &= ~E1000_WUFC_MAG;
3753
3754	if ((if_getcapenable(ifp) & IFCAP_WOL_UCAST) == 0)
3755		adapter->wol &= ~E1000_WUFC_EX;
3756
3757	if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) == 0)
3758		adapter->wol &= ~E1000_WUFC_MC;
3759	else {
3760		rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3761		rctl |= E1000_RCTL_MPE;
3762		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3763	}
3764
3765	if (!(adapter->wol & (E1000_WUFC_EX | E1000_WUFC_MAG | E1000_WUFC_MC)))
3766		goto pme;
3767
3768	/* Advertise the wakeup capability */
3769	ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
3770	ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
3771	E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
3772
3773	/* Keep the laser running on Fiber adapters */
3774	if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
3775	    adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
3776		ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3777		ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
3778		E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
3779	}
3780
3781	if ((adapter->hw.mac.type == e1000_ich8lan) ||
3782	    (adapter->hw.mac.type == e1000_pchlan) ||
3783	    (adapter->hw.mac.type == e1000_ich9lan) ||
3784	    (adapter->hw.mac.type == e1000_ich10lan))
3785		e1000_suspend_workarounds_ich8lan(&adapter->hw);
3786
3787	if ( adapter->hw.mac.type >= e1000_pchlan) {
3788		error = em_enable_phy_wakeup(adapter);
3789		if (error)
3790			goto pme;
3791	} else {
3792		/* Enable wakeup by the MAC */
3793		E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
3794		E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
3795	}
3796
3797	if (adapter->hw.phy.type == e1000_phy_igp_3)
3798		e1000_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
3799
3800pme:
3801	status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
3802	status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
3803	if (!error && (if_getcapenable(ifp) & IFCAP_WOL))
3804		status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3805	pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
3806
3807	return;
3808}
3809
3810/*
3811 * WOL in the newer chipset interfaces (pchlan)
3812 * require thing to be copied into the phy
3813 */
3814static int
3815em_enable_phy_wakeup(struct adapter *adapter)
3816{
3817	struct e1000_hw *hw = &adapter->hw;
3818	u32 mreg, ret = 0;
3819	u16 preg;
3820
3821	/* copy MAC RARs to PHY RARs */
3822	e1000_copy_rx_addrs_to_phy_ich8lan(hw);
3823
3824	/* copy MAC MTA to PHY MTA */
3825	for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
3826		mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
3827		e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
3828		e1000_write_phy_reg(hw, BM_MTA(i) + 1,
3829		    (u16)((mreg >> 16) & 0xFFFF));
3830	}
3831
3832	/* configure PHY Rx Control register */
3833	e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
3834	mreg = E1000_READ_REG(hw, E1000_RCTL);
3835	if (mreg & E1000_RCTL_UPE)
3836		preg |= BM_RCTL_UPE;
3837	if (mreg & E1000_RCTL_MPE)
3838		preg |= BM_RCTL_MPE;
3839	preg &= ~(BM_RCTL_MO_MASK);
3840	if (mreg & E1000_RCTL_MO_3)
3841		preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
3842				<< BM_RCTL_MO_SHIFT);
3843	if (mreg & E1000_RCTL_BAM)
3844		preg |= BM_RCTL_BAM;
3845	if (mreg & E1000_RCTL_PMCF)
3846		preg |= BM_RCTL_PMCF;
3847	mreg = E1000_READ_REG(hw, E1000_CTRL);
3848	if (mreg & E1000_CTRL_RFCE)
3849		preg |= BM_RCTL_RFCE;
3850	e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
3851
3852	/* enable PHY wakeup in MAC register */
3853	E1000_WRITE_REG(hw, E1000_WUC,
3854	    E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN | E1000_WUC_APME);
3855	E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
3856
3857	/* configure and enable PHY wakeup in PHY registers */
3858	e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
3859	e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
3860
3861	/* activate PHY wakeup */
3862	ret = hw->phy.ops.acquire(hw);
3863	if (ret) {
3864		printf("Could not acquire PHY\n");
3865		return ret;
3866	}
3867	e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
3868	                         (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
3869	ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
3870	if (ret) {
3871		printf("Could not read PHY page 769\n");
3872		goto out;
3873	}
3874	preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
3875	ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
3876	if (ret)
3877		printf("Could not set PHY Host Wakeup bit\n");
3878out:
3879	hw->phy.ops.release(hw);
3880
3881	return ret;
3882}
3883
3884static void
3885em_if_led_func(if_ctx_t ctx, int onoff)
3886{
3887	struct adapter *adapter = iflib_get_softc(ctx);
3888
3889	if (onoff) {
3890		e1000_setup_led(&adapter->hw);
3891		e1000_led_on(&adapter->hw);
3892	} else {
3893		e1000_led_off(&adapter->hw);
3894		e1000_cleanup_led(&adapter->hw);
3895	}
3896}
3897
3898/*
3899 * Disable the L0S and L1 LINK states
3900 */
3901static void
3902em_disable_aspm(struct adapter *adapter)
3903{
3904	int base, reg;
3905	u16 link_cap,link_ctrl;
3906	device_t dev = adapter->dev;
3907
3908	switch (adapter->hw.mac.type) {
3909	case e1000_82573:
3910	case e1000_82574:
3911	case e1000_82583:
3912		break;
3913	default:
3914		return;
3915	}
3916	if (pci_find_cap(dev, PCIY_EXPRESS, &base) != 0)
3917		return;
3918	reg = base + PCIER_LINK_CAP;
3919	link_cap = pci_read_config(dev, reg, 2);
3920	if ((link_cap & PCIEM_LINK_CAP_ASPM) == 0)
3921		return;
3922	reg = base + PCIER_LINK_CTL;
3923	link_ctrl = pci_read_config(dev, reg, 2);
3924	link_ctrl &= ~PCIEM_LINK_CTL_ASPMC;
3925	pci_write_config(dev, reg, link_ctrl, 2);
3926	return;
3927}
3928
3929/**********************************************************************
3930 *
3931 *  Update the board statistics counters.
3932 *
3933 **********************************************************************/
3934static void
3935em_update_stats_counters(struct adapter *adapter)
3936{
3937	u64 prev_xoffrxc = adapter->stats.xoffrxc;
3938
3939	if(adapter->hw.phy.media_type == e1000_media_type_copper ||
3940	   (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
3941		adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
3942		adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
3943	}
3944	adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
3945	adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
3946	adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
3947	adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
3948
3949	adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
3950	adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
3951	adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
3952	adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
3953	adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
3954	adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
3955	adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
3956	adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
3957	/*
3958	 ** For watchdog management we need to know if we have been
3959	 ** paused during the last interval, so capture that here.
3960	*/
3961	if (adapter->stats.xoffrxc != prev_xoffrxc)
3962		adapter->shared->isc_pause_frames = 1;
3963	adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
3964	adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
3965	adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
3966	adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
3967	adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
3968	adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
3969	adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
3970	adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
3971	adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
3972	adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
3973	adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
3974	adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
3975
3976	/* For the 64-bit byte counters the low dword must be read first. */
3977	/* Both registers clear on the read of the high dword */
3978
3979	adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCL) +
3980	    ((u64)E1000_READ_REG(&adapter->hw, E1000_GORCH) << 32);
3981	adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCL) +
3982	    ((u64)E1000_READ_REG(&adapter->hw, E1000_GOTCH) << 32);
3983
3984	adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
3985	adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
3986	adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
3987	adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
3988	adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
3989
3990	adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
3991	adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
3992
3993	adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
3994	adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
3995	adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
3996	adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
3997	adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
3998	adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
3999	adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
4000	adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
4001	adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
4002	adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
4003
4004	/* Interrupt Counts */
4005
4006	adapter->stats.iac += E1000_READ_REG(&adapter->hw, E1000_IAC);
4007	adapter->stats.icrxptc += E1000_READ_REG(&adapter->hw, E1000_ICRXPTC);
4008	adapter->stats.icrxatc += E1000_READ_REG(&adapter->hw, E1000_ICRXATC);
4009	adapter->stats.ictxptc += E1000_READ_REG(&adapter->hw, E1000_ICTXPTC);
4010	adapter->stats.ictxatc += E1000_READ_REG(&adapter->hw, E1000_ICTXATC);
4011	adapter->stats.ictxqec += E1000_READ_REG(&adapter->hw, E1000_ICTXQEC);
4012	adapter->stats.ictxqmtc += E1000_READ_REG(&adapter->hw, E1000_ICTXQMTC);
4013	adapter->stats.icrxdmtc += E1000_READ_REG(&adapter->hw, E1000_ICRXDMTC);
4014	adapter->stats.icrxoc += E1000_READ_REG(&adapter->hw, E1000_ICRXOC);
4015
4016	if (adapter->hw.mac.type >= e1000_82543) {
4017		adapter->stats.algnerrc +=
4018		E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
4019		adapter->stats.rxerrc +=
4020		E1000_READ_REG(&adapter->hw, E1000_RXERRC);
4021		adapter->stats.tncrs +=
4022		E1000_READ_REG(&adapter->hw, E1000_TNCRS);
4023		adapter->stats.cexterr +=
4024		E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
4025		adapter->stats.tsctc +=
4026		E1000_READ_REG(&adapter->hw, E1000_TSCTC);
4027		adapter->stats.tsctfc +=
4028		E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
4029	}
4030}
4031
4032static uint64_t
4033em_if_get_counter(if_ctx_t ctx, ift_counter cnt)
4034{
4035	struct adapter *adapter = iflib_get_softc(ctx);
4036	struct ifnet *ifp = iflib_get_ifp(ctx);
4037
4038	switch (cnt) {
4039	case IFCOUNTER_COLLISIONS:
4040		return (adapter->stats.colc);
4041	case IFCOUNTER_IERRORS:
4042		return (adapter->dropped_pkts + adapter->stats.rxerrc +
4043		    adapter->stats.crcerrs + adapter->stats.algnerrc +
4044		    adapter->stats.ruc + adapter->stats.roc +
4045		    adapter->stats.mpc + adapter->stats.cexterr);
4046	case IFCOUNTER_OERRORS:
4047		return (adapter->stats.ecol + adapter->stats.latecol +
4048		    adapter->watchdog_events);
4049	default:
4050		return (if_get_counter_default(ifp, cnt));
4051	}
4052}
4053
4054/* em_if_needs_restart - Tell iflib when the driver needs to be reinitialized
4055 * @ctx: iflib context
4056 * @event: event code to check
4057 *
4058 * Defaults to returning true for unknown events.
4059 *
4060 * @returns true if iflib needs to reinit the interface
4061 */
4062static bool
4063em_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
4064{
4065	switch (event) {
4066	case IFLIB_RESTART_VLAN_CONFIG:
4067	default:
4068		return (true);
4069	}
4070}
4071
4072/* Export a single 32-bit register via a read-only sysctl. */
4073static int
4074em_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
4075{
4076	struct adapter *adapter;
4077	u_int val;
4078
4079	adapter = oidp->oid_arg1;
4080	val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2);
4081	return (sysctl_handle_int(oidp, &val, 0, req));
4082}
4083
4084/*
4085 * Add sysctl variables, one per statistic, to the system.
4086 */
4087static void
4088em_add_hw_stats(struct adapter *adapter)
4089{
4090	device_t dev = iflib_get_dev(adapter->ctx);
4091	struct em_tx_queue *tx_que = adapter->tx_queues;
4092	struct em_rx_queue *rx_que = adapter->rx_queues;
4093
4094	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4095	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4096	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4097	struct e1000_hw_stats *stats = &adapter->stats;
4098
4099	struct sysctl_oid *stat_node, *queue_node, *int_node;
4100	struct sysctl_oid_list *stat_list, *queue_list, *int_list;
4101
4102#define QUEUE_NAME_LEN 32
4103	char namebuf[QUEUE_NAME_LEN];
4104
4105	/* Driver Statistics */
4106	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4107			CTLFLAG_RD, &adapter->dropped_pkts,
4108			"Driver dropped packets");
4109	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
4110			CTLFLAG_RD, &adapter->link_irq,
4111			"Link MSI-X IRQ Handled");
4112	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
4113			CTLFLAG_RD, &adapter->rx_overruns,
4114			"RX overruns");
4115	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
4116			CTLFLAG_RD, &adapter->watchdog_events,
4117			"Watchdog timeouts");
4118	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control",
4119	    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
4120	    adapter, E1000_CTRL, em_sysctl_reg_handler, "IU",
4121	    "Device Control Register");
4122	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control",
4123	    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
4124	    adapter, E1000_RCTL, em_sysctl_reg_handler, "IU",
4125	    "Receiver Control Register");
4126	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
4127			CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
4128			"Flow Control High Watermark");
4129	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water",
4130			CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
4131			"Flow Control Low Watermark");
4132
4133	for (int i = 0; i < adapter->tx_num_queues; i++, tx_que++) {
4134		struct tx_ring *txr = &tx_que->txr;
4135		snprintf(namebuf, QUEUE_NAME_LEN, "queue_tx_%d", i);
4136		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4137		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX Queue Name");
4138		queue_list = SYSCTL_CHILDREN(queue_node);
4139
4140		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
4141		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter,
4142		    E1000_TDH(txr->me), em_sysctl_reg_handler, "IU",
4143		    "Transmit Descriptor Head");
4144		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
4145		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter,
4146		    E1000_TDT(txr->me), em_sysctl_reg_handler, "IU",
4147		    "Transmit Descriptor Tail");
4148		SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tx_irq",
4149				CTLFLAG_RD, &txr->tx_irq,
4150				"Queue MSI-X Transmit Interrupts");
4151	}
4152
4153	for (int j = 0; j < adapter->rx_num_queues; j++, rx_que++) {
4154		struct rx_ring *rxr = &rx_que->rxr;
4155		snprintf(namebuf, QUEUE_NAME_LEN, "queue_rx_%d", j);
4156		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4157		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX Queue Name");
4158		queue_list = SYSCTL_CHILDREN(queue_node);
4159
4160		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
4161		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter,
4162		    E1000_RDH(rxr->me), em_sysctl_reg_handler, "IU",
4163		    "Receive Descriptor Head");
4164		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
4165		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter,
4166		    E1000_RDT(rxr->me), em_sysctl_reg_handler, "IU",
4167		    "Receive Descriptor Tail");
4168		SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "rx_irq",
4169				CTLFLAG_RD, &rxr->rx_irq,
4170				"Queue MSI-X Receive Interrupts");
4171	}
4172
4173	/* MAC stats get their own sub node */
4174
4175	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4176	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Statistics");
4177	stat_list = SYSCTL_CHILDREN(stat_node);
4178
4179	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll",
4180			CTLFLAG_RD, &stats->ecol,
4181			"Excessive collisions");
4182	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll",
4183			CTLFLAG_RD, &stats->scc,
4184			"Single collisions");
4185	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll",
4186			CTLFLAG_RD, &stats->mcc,
4187			"Multiple collisions");
4188	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll",
4189			CTLFLAG_RD, &stats->latecol,
4190			"Late collisions");
4191	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count",
4192			CTLFLAG_RD, &stats->colc,
4193			"Collision Count");
4194	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
4195			CTLFLAG_RD, &adapter->stats.symerrs,
4196			"Symbol Errors");
4197	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
4198			CTLFLAG_RD, &adapter->stats.sec,
4199			"Sequence Errors");
4200	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count",
4201			CTLFLAG_RD, &adapter->stats.dc,
4202			"Defer Count");
4203	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets",
4204			CTLFLAG_RD, &adapter->stats.mpc,
4205			"Missed Packets");
4206	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
4207			CTLFLAG_RD, &adapter->stats.rnbc,
4208			"Receive No Buffers");
4209	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
4210			CTLFLAG_RD, &adapter->stats.ruc,
4211			"Receive Undersize");
4212	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4213			CTLFLAG_RD, &adapter->stats.rfc,
4214			"Fragmented Packets Received ");
4215	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
4216			CTLFLAG_RD, &adapter->stats.roc,
4217			"Oversized Packets Received");
4218	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
4219			CTLFLAG_RD, &adapter->stats.rjc,
4220			"Recevied Jabber");
4221	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs",
4222			CTLFLAG_RD, &adapter->stats.rxerrc,
4223			"Receive Errors");
4224	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4225			CTLFLAG_RD, &adapter->stats.crcerrs,
4226			"CRC errors");
4227	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
4228			CTLFLAG_RD, &adapter->stats.algnerrc,
4229			"Alignment Errors");
4230	/* On 82575 these are collision counts */
4231	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
4232			CTLFLAG_RD, &adapter->stats.cexterr,
4233			"Collision/Carrier extension errors");
4234	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4235			CTLFLAG_RD, &adapter->stats.xonrxc,
4236			"XON Received");
4237	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4238			CTLFLAG_RD, &adapter->stats.xontxc,
4239			"XON Transmitted");
4240	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4241			CTLFLAG_RD, &adapter->stats.xoffrxc,
4242			"XOFF Received");
4243	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4244			CTLFLAG_RD, &adapter->stats.xofftxc,
4245			"XOFF Transmitted");
4246
4247	/* Packet Reception Stats */
4248	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
4249			CTLFLAG_RD, &adapter->stats.tpr,
4250			"Total Packets Received ");
4251	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
4252			CTLFLAG_RD, &adapter->stats.gprc,
4253			"Good Packets Received");
4254	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
4255			CTLFLAG_RD, &adapter->stats.bprc,
4256			"Broadcast Packets Received");
4257	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
4258			CTLFLAG_RD, &adapter->stats.mprc,
4259			"Multicast Packets Received");
4260	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4261			CTLFLAG_RD, &adapter->stats.prc64,
4262			"64 byte frames received ");
4263	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4264			CTLFLAG_RD, &adapter->stats.prc127,
4265			"65-127 byte frames received");
4266	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4267			CTLFLAG_RD, &adapter->stats.prc255,
4268			"128-255 byte frames received");
4269	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4270			CTLFLAG_RD, &adapter->stats.prc511,
4271			"256-511 byte frames received");
4272	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4273			CTLFLAG_RD, &adapter->stats.prc1023,
4274			"512-1023 byte frames received");
4275	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4276			CTLFLAG_RD, &adapter->stats.prc1522,
4277			"1023-1522 byte frames received");
4278	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
4279			CTLFLAG_RD, &adapter->stats.gorc,
4280			"Good Octets Received");
4281
4282	/* Packet Transmission Stats */
4283	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4284			CTLFLAG_RD, &adapter->stats.gotc,
4285			"Good Octets Transmitted");
4286	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4287			CTLFLAG_RD, &adapter->stats.tpt,
4288			"Total Packets Transmitted");
4289	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4290			CTLFLAG_RD, &adapter->stats.gptc,
4291			"Good Packets Transmitted");
4292	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4293			CTLFLAG_RD, &adapter->stats.bptc,
4294			"Broadcast Packets Transmitted");
4295	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4296			CTLFLAG_RD, &adapter->stats.mptc,
4297			"Multicast Packets Transmitted");
4298	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4299			CTLFLAG_RD, &adapter->stats.ptc64,
4300			"64 byte frames transmitted ");
4301	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4302			CTLFLAG_RD, &adapter->stats.ptc127,
4303			"65-127 byte frames transmitted");
4304	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4305			CTLFLAG_RD, &adapter->stats.ptc255,
4306			"128-255 byte frames transmitted");
4307	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4308			CTLFLAG_RD, &adapter->stats.ptc511,
4309			"256-511 byte frames transmitted");
4310	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4311			CTLFLAG_RD, &adapter->stats.ptc1023,
4312			"512-1023 byte frames transmitted");
4313	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4314			CTLFLAG_RD, &adapter->stats.ptc1522,
4315			"1024-1522 byte frames transmitted");
4316	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd",
4317			CTLFLAG_RD, &adapter->stats.tsctc,
4318			"TSO Contexts Transmitted");
4319	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
4320			CTLFLAG_RD, &adapter->stats.tsctfc,
4321			"TSO Contexts Failed");
4322
4323
4324	/* Interrupt Stats */
4325
4326	int_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "interrupts",
4327	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Interrupt Statistics");
4328	int_list = SYSCTL_CHILDREN(int_node);
4329
4330	SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "asserts",
4331			CTLFLAG_RD, &adapter->stats.iac,
4332			"Interrupt Assertion Count");
4333
4334	SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_pkt_timer",
4335			CTLFLAG_RD, &adapter->stats.icrxptc,
4336			"Interrupt Cause Rx Pkt Timer Expire Count");
4337
4338	SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_abs_timer",
4339			CTLFLAG_RD, &adapter->stats.icrxatc,
4340			"Interrupt Cause Rx Abs Timer Expire Count");
4341
4342	SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_pkt_timer",
4343			CTLFLAG_RD, &adapter->stats.ictxptc,
4344			"Interrupt Cause Tx Pkt Timer Expire Count");
4345
4346	SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_abs_timer",
4347			CTLFLAG_RD, &adapter->stats.ictxatc,
4348			"Interrupt Cause Tx Abs Timer Expire Count");
4349
4350	SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_queue_empty",
4351			CTLFLAG_RD, &adapter->stats.ictxqec,
4352			"Interrupt Cause Tx Queue Empty Count");
4353
4354	SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_queue_min_thresh",
4355			CTLFLAG_RD, &adapter->stats.ictxqmtc,
4356			"Interrupt Cause Tx Queue Min Thresh Count");
4357
4358	SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh",
4359			CTLFLAG_RD, &adapter->stats.icrxdmtc,
4360			"Interrupt Cause Rx Desc Min Thresh Count");
4361
4362	SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_overrun",
4363			CTLFLAG_RD, &adapter->stats.icrxoc,
4364			"Interrupt Cause Receiver Overrun Count");
4365}
4366
4367/**********************************************************************
4368 *
4369 *  This routine provides a way to dump out the adapter eeprom,
4370 *  often a useful debug/service tool. This only dumps the first
4371 *  32 words, stuff that matters is in that extent.
4372 *
4373 **********************************************************************/
4374static int
4375em_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
4376{
4377	struct adapter *adapter = (struct adapter *)arg1;
4378	int error;
4379	int result;
4380
4381	result = -1;
4382	error = sysctl_handle_int(oidp, &result, 0, req);
4383
4384	if (error || !req->newptr)
4385		return (error);
4386
4387	/*
4388	 * This value will cause a hex dump of the
4389	 * first 32 16-bit words of the EEPROM to
4390	 * the screen.
4391	 */
4392	if (result == 1)
4393		em_print_nvm_info(adapter);
4394
4395	return (error);
4396}
4397
4398static void
4399em_print_nvm_info(struct adapter *adapter)
4400{
4401	u16 eeprom_data;
4402	int i, j, row = 0;
4403
4404	/* Its a bit crude, but it gets the job done */
4405	printf("\nInterface EEPROM Dump:\n");
4406	printf("Offset\n0x0000  ");
4407	for (i = 0, j = 0; i < 32; i++, j++) {
4408		if (j == 8) { /* Make the offset block */
4409			j = 0; ++row;
4410			printf("\n0x00%x0  ",row);
4411		}
4412		e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
4413		printf("%04x ", eeprom_data);
4414	}
4415	printf("\n");
4416}
4417
4418static int
4419em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
4420{
4421	struct em_int_delay_info *info;
4422	struct adapter *adapter;
4423	u32 regval;
4424	int error, usecs, ticks;
4425
4426	info = (struct em_int_delay_info *) arg1;
4427	usecs = info->value;
4428	error = sysctl_handle_int(oidp, &usecs, 0, req);
4429	if (error != 0 || req->newptr == NULL)
4430		return (error);
4431	if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
4432		return (EINVAL);
4433	info->value = usecs;
4434	ticks = EM_USECS_TO_TICKS(usecs);
4435	if (info->offset == E1000_ITR)	/* units are 256ns here */
4436		ticks *= 4;
4437
4438	adapter = info->adapter;
4439
4440	regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
4441	regval = (regval & ~0xffff) | (ticks & 0xffff);
4442	/* Handle a few special cases. */
4443	switch (info->offset) {
4444	case E1000_RDTR:
4445		break;
4446	case E1000_TIDV:
4447		if (ticks == 0) {
4448			adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
4449			/* Don't write 0 into the TIDV register. */
4450			regval++;
4451		} else
4452			adapter->txd_cmd |= E1000_TXD_CMD_IDE;
4453		break;
4454	}
4455	E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
4456	return (0);
4457}
4458
4459static void
4460em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
4461	const char *description, struct em_int_delay_info *info,
4462	int offset, int value)
4463{
4464	info->adapter = adapter;
4465	info->offset = offset;
4466	info->value = value;
4467	SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
4468	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4469	    OID_AUTO, name, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
4470	    info, 0, em_sysctl_int_delay, "I", description);
4471}
4472
4473/*
4474 * Set flow control using sysctl:
4475 * Flow control values:
4476 *      0 - off
4477 *      1 - rx pause
4478 *      2 - tx pause
4479 *      3 - full
4480 */
4481static int
4482em_set_flowcntl(SYSCTL_HANDLER_ARGS)
4483{
4484	int error;
4485	static int input = 3; /* default is full */
4486	struct adapter	*adapter = (struct adapter *) arg1;
4487
4488	error = sysctl_handle_int(oidp, &input, 0, req);
4489
4490	if ((error) || (req->newptr == NULL))
4491		return (error);
4492
4493	if (input == adapter->fc) /* no change? */
4494		return (error);
4495
4496	switch (input) {
4497	case e1000_fc_rx_pause:
4498	case e1000_fc_tx_pause:
4499	case e1000_fc_full:
4500	case e1000_fc_none:
4501		adapter->hw.fc.requested_mode = input;
4502		adapter->fc = input;
4503		break;
4504	default:
4505		/* Do nothing */
4506		return (error);
4507	}
4508
4509	adapter->hw.fc.current_mode = adapter->hw.fc.requested_mode;
4510	e1000_force_mac_fc(&adapter->hw);
4511	return (error);
4512}
4513
4514/*
4515 * Manage Energy Efficient Ethernet:
4516 * Control values:
4517 *     0/1 - enabled/disabled
4518 */
4519static int
4520em_sysctl_eee(SYSCTL_HANDLER_ARGS)
4521{
4522	struct adapter *adapter = (struct adapter *) arg1;
4523	int error, value;
4524
4525	value = adapter->hw.dev_spec.ich8lan.eee_disable;
4526	error = sysctl_handle_int(oidp, &value, 0, req);
4527	if (error || req->newptr == NULL)
4528		return (error);
4529	adapter->hw.dev_spec.ich8lan.eee_disable = (value != 0);
4530	em_if_init(adapter->ctx);
4531
4532	return (0);
4533}
4534
4535static int
4536em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
4537{
4538	struct adapter *adapter;
4539	int error;
4540	int result;
4541
4542	result = -1;
4543	error = sysctl_handle_int(oidp, &result, 0, req);
4544
4545	if (error || !req->newptr)
4546		return (error);
4547
4548	if (result == 1) {
4549		adapter = (struct adapter *) arg1;
4550		em_print_debug_info(adapter);
4551	}
4552
4553	return (error);
4554}
4555
4556static int
4557em_get_rs(SYSCTL_HANDLER_ARGS)
4558{
4559	struct adapter *adapter = (struct adapter *) arg1;
4560	int error;
4561	int result;
4562
4563	result = 0;
4564	error = sysctl_handle_int(oidp, &result, 0, req);
4565
4566	if (error || !req->newptr || result != 1)
4567		return (error);
4568	em_dump_rs(adapter);
4569
4570	return (error);
4571}
4572
4573static void
4574em_if_debug(if_ctx_t ctx)
4575{
4576	em_dump_rs(iflib_get_softc(ctx));
4577}
4578
4579/*
4580 * This routine is meant to be fluid, add whatever is
4581 * needed for debugging a problem.  -jfv
4582 */
4583static void
4584em_print_debug_info(struct adapter *adapter)
4585{
4586	device_t dev = iflib_get_dev(adapter->ctx);
4587	struct ifnet *ifp = iflib_get_ifp(adapter->ctx);
4588	struct tx_ring *txr = &adapter->tx_queues->txr;
4589	struct rx_ring *rxr = &adapter->rx_queues->rxr;
4590
4591	if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
4592		printf("Interface is RUNNING ");
4593	else
4594		printf("Interface is NOT RUNNING\n");
4595
4596	if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE)
4597		printf("and INACTIVE\n");
4598	else
4599		printf("and ACTIVE\n");
4600
4601	for (int i = 0; i < adapter->tx_num_queues; i++, txr++) {
4602		device_printf(dev, "TX Queue %d ------\n", i);
4603		device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
4604			E1000_READ_REG(&adapter->hw, E1000_TDH(i)),
4605			E1000_READ_REG(&adapter->hw, E1000_TDT(i)));
4606
4607	}
4608	for (int j=0; j < adapter->rx_num_queues; j++, rxr++) {
4609		device_printf(dev, "RX Queue %d ------\n", j);
4610		device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
4611			E1000_READ_REG(&adapter->hw, E1000_RDH(j)),
4612			E1000_READ_REG(&adapter->hw, E1000_RDT(j)));
4613	}
4614}
4615
4616/*
4617 * 82574 only:
4618 * Write a new value to the EEPROM increasing the number of MSI-X
4619 * vectors from 3 to 5, for proper multiqueue support.
4620 */
4621static void
4622em_enable_vectors_82574(if_ctx_t ctx)
4623{
4624	struct adapter *adapter = iflib_get_softc(ctx);
4625	struct e1000_hw *hw = &adapter->hw;
4626	device_t dev = iflib_get_dev(ctx);
4627	u16 edata;
4628
4629	e1000_read_nvm(hw, EM_NVM_PCIE_CTRL, 1, &edata);
4630	if (bootverbose)
4631		device_printf(dev, "EM_NVM_PCIE_CTRL = %#06x\n", edata);
4632	if (((edata & EM_NVM_MSIX_N_MASK) >> EM_NVM_MSIX_N_SHIFT) != 4) {
4633		device_printf(dev, "Writing to eeprom: increasing "
4634		    "reported MSI-X vectors from 3 to 5...\n");
4635		edata &= ~(EM_NVM_MSIX_N_MASK);
4636		edata |= 4 << EM_NVM_MSIX_N_SHIFT;
4637		e1000_write_nvm(hw, EM_NVM_PCIE_CTRL, 1, &edata);
4638		e1000_update_nvm_checksum(hw);
4639		device_printf(dev, "Writing to eeprom: done\n");
4640	}
4641}
4642