1/******************************************************************************
2
3  Copyright (c) 2013-2018, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD$*/
34
35#include "iavf.h"
36
37/*********************************************************************
38 *  Driver version
39 *********************************************************************/
40#define IAVF_DRIVER_VERSION_MAJOR	2
41#define IAVF_DRIVER_VERSION_MINOR	0
42#define IAVF_DRIVER_VERSION_BUILD	0
43
44#define IAVF_DRIVER_VERSION_STRING			\
45    __XSTRING(IAVF_DRIVER_VERSION_MAJOR) "."		\
46    __XSTRING(IAVF_DRIVER_VERSION_MINOR) "."		\
47    __XSTRING(IAVF_DRIVER_VERSION_BUILD) "-k"
48
49/*********************************************************************
50 *  PCI Device ID Table
51 *
52 *  Used by probe to select devices to load on
53 *
54 *  ( Vendor ID, Device ID, Branding String )
55 *********************************************************************/
56
57static pci_vendor_info_t iavf_vendor_info_array[] =
58{
59	PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, "Intel(R) Ethernet Virtual Function 700 Series"),
60	PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, "Intel(R) Ethernet Virtual Function 700 Series (X722)"),
61	PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_ADAPTIVE_VF, "Intel(R) Ethernet Adaptive Virtual Function"),
62	/* required last entry */
63	PVID_END
64};
65
66/*********************************************************************
67 *  Function prototypes
68 *********************************************************************/
69static void	 *iavf_register(device_t dev);
70static int	 iavf_if_attach_pre(if_ctx_t ctx);
71static int	 iavf_if_attach_post(if_ctx_t ctx);
72static int	 iavf_if_detach(if_ctx_t ctx);
73static int	 iavf_if_shutdown(if_ctx_t ctx);
74static int	 iavf_if_suspend(if_ctx_t ctx);
75static int	 iavf_if_resume(if_ctx_t ctx);
76static int	 iavf_if_msix_intr_assign(if_ctx_t ctx, int msix);
77static void	 iavf_if_enable_intr(if_ctx_t ctx);
78static void	 iavf_if_disable_intr(if_ctx_t ctx);
79static int	 iavf_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
80static int	 iavf_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
81static int	 iavf_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
82static int	 iavf_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
83static void	 iavf_if_queues_free(if_ctx_t ctx);
84static void	 iavf_if_update_admin_status(if_ctx_t ctx);
85static void	 iavf_if_multi_set(if_ctx_t ctx);
86static int	 iavf_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
87static void	 iavf_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
88static int	 iavf_if_media_change(if_ctx_t ctx);
89static int	 iavf_if_promisc_set(if_ctx_t ctx, int flags);
90static void	 iavf_if_timer(if_ctx_t ctx, uint16_t qid);
91static void	 iavf_if_vlan_register(if_ctx_t ctx, u16 vtag);
92static void	 iavf_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
93static uint64_t	 iavf_if_get_counter(if_ctx_t ctx, ift_counter cnt);
94static void	 iavf_if_stop(if_ctx_t ctx);
95static bool	 iavf_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
96
97static int	iavf_allocate_pci_resources(struct iavf_sc *);
98static int	iavf_reset_complete(struct i40e_hw *);
99static int	iavf_setup_vc(struct iavf_sc *);
100static int	iavf_reset(struct iavf_sc *);
101static int	iavf_vf_config(struct iavf_sc *);
102static void	iavf_init_filters(struct iavf_sc *);
103static void	iavf_free_pci_resources(struct iavf_sc *);
104static void	iavf_free_filters(struct iavf_sc *);
105static void	iavf_setup_interface(device_t, struct iavf_sc *);
106static void	iavf_add_device_sysctls(struct iavf_sc *);
107static void	iavf_enable_adminq_irq(struct i40e_hw *);
108static void	iavf_disable_adminq_irq(struct i40e_hw *);
109static void	iavf_enable_queue_irq(struct i40e_hw *, int);
110static void	iavf_disable_queue_irq(struct i40e_hw *, int);
111static void	iavf_config_rss(struct iavf_sc *);
112static void	iavf_stop(struct iavf_sc *);
113
114static int	iavf_add_mac_filter(struct iavf_sc *, u8 *, u16);
115static int	iavf_del_mac_filter(struct iavf_sc *sc, u8 *macaddr);
116static int	iavf_msix_que(void *);
117static int	iavf_msix_adminq(void *);
118//static void	iavf_del_multi(struct iavf_sc *sc);
119static void	iavf_init_multi(struct iavf_sc *sc);
120static void	iavf_configure_itr(struct iavf_sc *sc);
121
122static int	iavf_sysctl_rx_itr(SYSCTL_HANDLER_ARGS);
123static int	iavf_sysctl_tx_itr(SYSCTL_HANDLER_ARGS);
124static int	iavf_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
125static int	iavf_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
126static int	iavf_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
127static int	iavf_sysctl_vf_reset(SYSCTL_HANDLER_ARGS);
128static int	iavf_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS);
129
130static void	iavf_save_tunables(struct iavf_sc *);
131static enum i40e_status_code
132    iavf_process_adminq(struct iavf_sc *, u16 *);
133static int	iavf_send_vc_msg(struct iavf_sc *sc, u32 op);
134static int	iavf_send_vc_msg_sleep(struct iavf_sc *sc, u32 op);
135
136/*********************************************************************
137 *  FreeBSD Device Interface Entry Points
138 *********************************************************************/
139
140static device_method_t iavf_methods[] = {
141	/* Device interface */
142	DEVMETHOD(device_register, iavf_register),
143	DEVMETHOD(device_probe, iflib_device_probe),
144	DEVMETHOD(device_attach, iflib_device_attach),
145	DEVMETHOD(device_detach, iflib_device_detach),
146	DEVMETHOD(device_shutdown, iflib_device_shutdown),
147	DEVMETHOD_END
148};
149
150static driver_t iavf_driver = {
151	"iavf", iavf_methods, sizeof(struct iavf_sc),
152};
153
154devclass_t iavf_devclass;
155DRIVER_MODULE(iavf, pci, iavf_driver, iavf_devclass, 0, 0);
156MODULE_PNP_INFO("U32:vendor;U32:device;U32:subvendor;U32:subdevice;U32:revision",
157    pci, iavf, iavf_vendor_info_array,
158        nitems(iavf_vendor_info_array) - 1);
159MODULE_VERSION(iavf, 1);
160
161MODULE_DEPEND(iavf, pci, 1, 1, 1);
162MODULE_DEPEND(iavf, ether, 1, 1, 1);
163MODULE_DEPEND(iavf, iflib, 1, 1, 1);
164
165MALLOC_DEFINE(M_IAVF, "iavf", "iavf driver allocations");
166
167static device_method_t iavf_if_methods[] = {
168	DEVMETHOD(ifdi_attach_pre, iavf_if_attach_pre),
169	DEVMETHOD(ifdi_attach_post, iavf_if_attach_post),
170	DEVMETHOD(ifdi_detach, iavf_if_detach),
171	DEVMETHOD(ifdi_shutdown, iavf_if_shutdown),
172	DEVMETHOD(ifdi_suspend, iavf_if_suspend),
173	DEVMETHOD(ifdi_resume, iavf_if_resume),
174	DEVMETHOD(ifdi_init, iavf_if_init),
175	DEVMETHOD(ifdi_stop, iavf_if_stop),
176	DEVMETHOD(ifdi_msix_intr_assign, iavf_if_msix_intr_assign),
177	DEVMETHOD(ifdi_intr_enable, iavf_if_enable_intr),
178	DEVMETHOD(ifdi_intr_disable, iavf_if_disable_intr),
179	DEVMETHOD(ifdi_rx_queue_intr_enable, iavf_if_rx_queue_intr_enable),
180	DEVMETHOD(ifdi_tx_queue_intr_enable, iavf_if_tx_queue_intr_enable),
181	DEVMETHOD(ifdi_tx_queues_alloc, iavf_if_tx_queues_alloc),
182	DEVMETHOD(ifdi_rx_queues_alloc, iavf_if_rx_queues_alloc),
183	DEVMETHOD(ifdi_queues_free, iavf_if_queues_free),
184	DEVMETHOD(ifdi_update_admin_status, iavf_if_update_admin_status),
185	DEVMETHOD(ifdi_multi_set, iavf_if_multi_set),
186	DEVMETHOD(ifdi_mtu_set, iavf_if_mtu_set),
187	DEVMETHOD(ifdi_media_status, iavf_if_media_status),
188	DEVMETHOD(ifdi_media_change, iavf_if_media_change),
189	DEVMETHOD(ifdi_promisc_set, iavf_if_promisc_set),
190	DEVMETHOD(ifdi_timer, iavf_if_timer),
191	DEVMETHOD(ifdi_vlan_register, iavf_if_vlan_register),
192	DEVMETHOD(ifdi_vlan_unregister, iavf_if_vlan_unregister),
193	DEVMETHOD(ifdi_get_counter, iavf_if_get_counter),
194	DEVMETHOD(ifdi_needs_restart, iavf_if_needs_restart),
195	DEVMETHOD_END
196};
197
198static driver_t iavf_if_driver = {
199	"iavf_if", iavf_if_methods, sizeof(struct iavf_sc)
200};
201
202/*
203** TUNEABLE PARAMETERS:
204*/
205
206static SYSCTL_NODE(_hw, OID_AUTO, iavf, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
207    "iavf driver parameters");
208
209/*
210 * Different method for processing TX descriptor
211 * completion.
212 */
213static int iavf_enable_head_writeback = 0;
214TUNABLE_INT("hw.iavf.enable_head_writeback",
215    &iavf_enable_head_writeback);
216SYSCTL_INT(_hw_iavf, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
217    &iavf_enable_head_writeback, 0,
218    "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
219
220static int iavf_core_debug_mask = 0;
221TUNABLE_INT("hw.iavf.core_debug_mask",
222    &iavf_core_debug_mask);
223SYSCTL_INT(_hw_iavf, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
224    &iavf_core_debug_mask, 0,
225    "Display debug statements that are printed in non-shared code");
226
227static int iavf_shared_debug_mask = 0;
228TUNABLE_INT("hw.iavf.shared_debug_mask",
229    &iavf_shared_debug_mask);
230SYSCTL_INT(_hw_iavf, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
231    &iavf_shared_debug_mask, 0,
232    "Display debug statements that are printed in shared code");
233
234int iavf_rx_itr = IXL_ITR_8K;
235TUNABLE_INT("hw.iavf.rx_itr", &iavf_rx_itr);
236SYSCTL_INT(_hw_iavf, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
237    &iavf_rx_itr, 0, "RX Interrupt Rate");
238
239int iavf_tx_itr = IXL_ITR_4K;
240TUNABLE_INT("hw.iavf.tx_itr", &iavf_tx_itr);
241SYSCTL_INT(_hw_iavf, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
242    &iavf_tx_itr, 0, "TX Interrupt Rate");
243
244extern struct if_txrx ixl_txrx_hwb;
245extern struct if_txrx ixl_txrx_dwb;
246
247static struct if_shared_ctx iavf_sctx_init = {
248	.isc_magic = IFLIB_MAGIC,
249	.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
250	.isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
251	.isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
252	.isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
253	.isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
254	.isc_rx_maxsize = 16384,
255	.isc_rx_nsegments = IXL_MAX_RX_SEGS,
256	.isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
257	.isc_nfl = 1,
258	.isc_ntxqs = 1,
259	.isc_nrxqs = 1,
260
261	.isc_admin_intrcnt = 1,
262	.isc_vendor_info = iavf_vendor_info_array,
263	.isc_driver_version = IAVF_DRIVER_VERSION_STRING,
264	.isc_driver = &iavf_if_driver,
265	.isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_IS_VF,
266
267	.isc_nrxd_min = {IXL_MIN_RING},
268	.isc_ntxd_min = {IXL_MIN_RING},
269	.isc_nrxd_max = {IXL_MAX_RING},
270	.isc_ntxd_max = {IXL_MAX_RING},
271	.isc_nrxd_default = {IXL_DEFAULT_RING},
272	.isc_ntxd_default = {IXL_DEFAULT_RING},
273};
274
275if_shared_ctx_t iavf_sctx = &iavf_sctx_init;
276
277/*** Functions ***/
278static void *
279iavf_register(device_t dev)
280{
281	return (iavf_sctx);
282}
283
284static int
285iavf_allocate_pci_resources(struct iavf_sc *sc)
286{
287	struct i40e_hw *hw = &sc->hw;
288	device_t dev = iflib_get_dev(sc->vsi.ctx);
289	int             rid;
290
291	/* Map BAR0 */
292	rid = PCIR_BAR(0);
293	sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
294	    &rid, RF_ACTIVE);
295
296	if (!(sc->pci_mem)) {
297		device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
298		return (ENXIO);
299 	}
300
301	/* Save off the PCI information */
302	hw->vendor_id = pci_get_vendor(dev);
303	hw->device_id = pci_get_device(dev);
304	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
305	hw->subsystem_vendor_id =
306	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
307	hw->subsystem_device_id =
308	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
309
310	hw->bus.device = pci_get_slot(dev);
311	hw->bus.func = pci_get_function(dev);
312
313	/* Save off register access information */
314	sc->osdep.mem_bus_space_tag =
315		rman_get_bustag(sc->pci_mem);
316	sc->osdep.mem_bus_space_handle =
317		rman_get_bushandle(sc->pci_mem);
318	sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
319	sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
320	sc->osdep.dev = dev;
321
322	sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
323	sc->hw.back = &sc->osdep;
324
325 	return (0);
326}
327
328static int
329iavf_if_attach_pre(if_ctx_t ctx)
330{
331	device_t dev;
332	struct iavf_sc *sc;
333	struct i40e_hw *hw;
334	struct ixl_vsi *vsi;
335	if_softc_ctx_t scctx;
336	int error = 0;
337
338	dev = iflib_get_dev(ctx);
339	sc = iflib_get_softc(ctx);
340
341	vsi = &sc->vsi;
342	vsi->back = sc;
343	sc->dev = dev;
344	hw = &sc->hw;
345
346	vsi->dev = dev;
347	vsi->hw = &sc->hw;
348	vsi->num_vlans = 0;
349	vsi->ctx = ctx;
350	vsi->media = iflib_get_media(ctx);
351	vsi->shared = scctx = iflib_get_softc_ctx(ctx);
352
353	iavf_save_tunables(sc);
354
355	/* Do PCI setup - map BAR0, etc */
356	if (iavf_allocate_pci_resources(sc)) {
357		device_printf(dev, "%s: Allocation of PCI resources failed\n",
358		    __func__);
359		error = ENXIO;
360		goto err_early;
361	}
362
363	iavf_dbg_init(sc, "Allocated PCI resources and MSI-X vectors\n");
364
365	/*
366	 * XXX: This is called by init_shared_code in the PF driver,
367	 * but the rest of that function does not support VFs.
368	 */
369	error = i40e_set_mac_type(hw);
370	if (error) {
371		device_printf(dev, "%s: set_mac_type failed: %d\n",
372		    __func__, error);
373		goto err_pci_res;
374	}
375
376	error = iavf_reset_complete(hw);
377	if (error) {
378		device_printf(dev, "%s: Device is still being reset\n",
379		    __func__);
380		goto err_pci_res;
381	}
382
383	iavf_dbg_init(sc, "VF Device is ready for configuration\n");
384
385	/* Sets up Admin Queue */
386	error = iavf_setup_vc(sc);
387	if (error) {
388		device_printf(dev, "%s: Error setting up PF comms, %d\n",
389		    __func__, error);
390		goto err_pci_res;
391	}
392
393	iavf_dbg_init(sc, "PF API version verified\n");
394
395	/* Need API version before sending reset message */
396	error = iavf_reset(sc);
397	if (error) {
398		device_printf(dev, "VF reset failed; reload the driver\n");
399		goto err_aq;
400	}
401
402	iavf_dbg_init(sc, "VF reset complete\n");
403
404	/* Ask for VF config from PF */
405	error = iavf_vf_config(sc);
406	if (error) {
407		device_printf(dev, "Error getting configuration from PF: %d\n",
408		    error);
409		goto err_aq;
410	}
411
412	device_printf(dev,
413	    "VSIs %d, QPs %d, MSI-X %d, RSS sizes: key %d lut %d\n",
414	    sc->vf_res->num_vsis,
415	    sc->vf_res->num_queue_pairs,
416	    sc->vf_res->max_vectors,
417	    sc->vf_res->rss_key_size,
418	    sc->vf_res->rss_lut_size);
419	iavf_dbg_info(sc, "Capabilities=%b\n",
420	    sc->vf_res->vf_cap_flags, IAVF_PRINTF_VF_OFFLOAD_FLAGS);
421
422	/* got VF config message back from PF, now we can parse it */
423	for (int i = 0; i < sc->vf_res->num_vsis; i++) {
424		/* XXX: We only use the first VSI we find */
425		if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
426			sc->vsi_res = &sc->vf_res->vsi_res[i];
427	}
428	if (!sc->vsi_res) {
429		device_printf(dev, "%s: no LAN VSI found\n", __func__);
430		error = EIO;
431		goto err_res_buf;
432	}
433	vsi->id = sc->vsi_res->vsi_id;
434
435	iavf_dbg_init(sc, "Resource Acquisition complete\n");
436
437	/* If no mac address was assigned just make a random one */
438	if (!iavf_check_ether_addr(hw->mac.addr)) {
439		u8 addr[ETHER_ADDR_LEN];
440		arc4rand(&addr, sizeof(addr), 0);
441		addr[0] &= 0xFE;
442		addr[0] |= 0x02;
443		bcopy(addr, hw->mac.addr, sizeof(addr));
444	}
445	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
446	iflib_set_mac(ctx, hw->mac.addr);
447
448	/* Allocate filter lists */
449	iavf_init_filters(sc);
450
451	/* Fill out more iflib parameters */
452	scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max =
453	    sc->vsi_res->num_queue_pairs;
454	if (vsi->enable_head_writeback) {
455		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
456		    * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
457		scctx->isc_txrx = &ixl_txrx_hwb;
458	} else {
459		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
460		    * sizeof(struct i40e_tx_desc), DBA_ALIGN);
461		scctx->isc_txrx = &ixl_txrx_dwb;
462	}
463	scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
464	    * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
465	scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
466	scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
467	scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
468	scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
469	scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
470	scctx->isc_rss_table_size = IXL_RSS_VSI_LUT_SIZE;
471	scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
472	scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
473
474	return (0);
475
476err_res_buf:
477	free(sc->vf_res, M_IAVF);
478err_aq:
479	i40e_shutdown_adminq(hw);
480err_pci_res:
481	iavf_free_pci_resources(sc);
482err_early:
483	return (error);
484}
485
486static int
487iavf_if_attach_post(if_ctx_t ctx)
488{
489	device_t dev;
490	struct iavf_sc	*sc;
491	struct i40e_hw	*hw;
492	struct ixl_vsi *vsi;
493	int error = 0;
494
495	INIT_DBG_DEV(dev, "begin");
496
497	dev = iflib_get_dev(ctx);
498	sc = iflib_get_softc(ctx);
499	vsi = &sc->vsi;
500	vsi->ifp = iflib_get_ifp(ctx);
501	hw = &sc->hw;
502
503	/* Save off determined number of queues for interface */
504	vsi->num_rx_queues = vsi->shared->isc_nrxqsets;
505	vsi->num_tx_queues = vsi->shared->isc_ntxqsets;
506
507	/* Setup the stack interface */
508	iavf_setup_interface(dev, sc);
509
510	INIT_DBG_DEV(dev, "Interface setup complete");
511
512	/* Initialize statistics & add sysctls */
513	bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
514	iavf_add_device_sysctls(sc);
515
516	sc->init_state = IAVF_INIT_READY;
517	atomic_store_rel_32(&sc->queues_enabled, 0);
518
519	/* We want AQ enabled early for init */
520	iavf_enable_adminq_irq(hw);
521
522	INIT_DBG_DEV(dev, "end");
523
524	return (error);
525}
526
527/**
528 * XXX: iflib always ignores the return value of detach()
529 * -> This means that this isn't allowed to fail
530 */
531static int
532iavf_if_detach(if_ctx_t ctx)
533{
534	struct iavf_sc *sc = iflib_get_softc(ctx);
535	struct ixl_vsi *vsi = &sc->vsi;
536	struct i40e_hw *hw = &sc->hw;
537	device_t dev = sc->dev;
538	enum i40e_status_code status;
539
540	INIT_DBG_DEV(dev, "begin");
541
542	/* Remove all the media and link information */
543	ifmedia_removeall(vsi->media);
544
545	iavf_disable_adminq_irq(hw);
546	status = i40e_shutdown_adminq(&sc->hw);
547	if (status != I40E_SUCCESS) {
548		device_printf(dev,
549		    "i40e_shutdown_adminq() failed with status %s\n",
550		    i40e_stat_str(hw, status));
551	}
552
553	free(sc->vf_res, M_IAVF);
554	iavf_free_pci_resources(sc);
555	iavf_free_filters(sc);
556
557	INIT_DBG_DEV(dev, "end");
558	return (0);
559}
560
561static int
562iavf_if_shutdown(if_ctx_t ctx)
563{
564	return (0);
565}
566
567static int
568iavf_if_suspend(if_ctx_t ctx)
569{
570	return (0);
571}
572
573static int
574iavf_if_resume(if_ctx_t ctx)
575{
576	return (0);
577}
578
579static int
580iavf_send_vc_msg_sleep(struct iavf_sc *sc, u32 op)
581{
582	int error = 0;
583	if_ctx_t ctx = sc->vsi.ctx;
584
585	error = ixl_vc_send_cmd(sc, op);
586	if (error != 0) {
587		iavf_dbg_vc(sc, "Error sending %b: %d\n", op, IAVF_FLAGS, error);
588		return (error);
589	}
590
591	/* Don't wait for a response if the device is being detached. */
592	if (!iflib_in_detach(ctx)) {
593		iavf_dbg_vc(sc, "Sleeping for op %b\n", op, IAVF_FLAGS);
594		error = sx_sleep(ixl_vc_get_op_chan(sc, op),
595		    iflib_ctx_lock_get(ctx), PRI_MAX, "iavf_vc", IAVF_AQ_TIMEOUT);
596
597		if (error == EWOULDBLOCK)
598			device_printf(sc->dev, "%b timed out\n", op, IAVF_FLAGS);
599	}
600
601	return (error);
602}
603
604static int
605iavf_send_vc_msg(struct iavf_sc *sc, u32 op)
606{
607	int error = 0;
608
609	error = ixl_vc_send_cmd(sc, op);
610	if (error != 0)
611		iavf_dbg_vc(sc, "Error sending %b: %d\n", op, IAVF_FLAGS, error);
612
613	return (error);
614}
615
616static void
617iavf_init_queues(struct ixl_vsi *vsi)
618{
619	struct ixl_tx_queue *tx_que = vsi->tx_queues;
620	struct ixl_rx_queue *rx_que = vsi->rx_queues;
621	struct rx_ring *rxr;
622
623	for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++)
624		ixl_init_tx_ring(vsi, tx_que);
625
626	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
627		rxr = &rx_que->rxr;
628
629		rxr->mbuf_sz = iflib_get_rx_mbuf_sz(vsi->ctx);
630
631		wr32(vsi->hw, rxr->tail, 0);
632	}
633}
634
635void
636iavf_if_init(if_ctx_t ctx)
637{
638	struct iavf_sc *sc = iflib_get_softc(ctx);
639	struct ixl_vsi *vsi = &sc->vsi;
640	struct i40e_hw *hw = &sc->hw;
641	struct ifnet *ifp = iflib_get_ifp(ctx);
642	u8 tmpaddr[ETHER_ADDR_LEN];
643	int error = 0;
644
645	INIT_DBG_IF(ifp, "begin");
646
647	MPASS(sx_xlocked(iflib_ctx_lock_get(ctx)));
648
649	error = iavf_reset_complete(hw);
650	if (error) {
651		device_printf(sc->dev, "%s: VF reset failed\n",
652		    __func__);
653	}
654
655	if (!i40e_check_asq_alive(hw)) {
656		iavf_dbg_info(sc, "ASQ is not alive, re-initializing AQ\n");
657		pci_enable_busmaster(sc->dev);
658		i40e_shutdown_adminq(hw);
659		i40e_init_adminq(hw);
660	}
661
662	/* Make sure queues are disabled */
663	iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DISABLE_QUEUES);
664
665	bcopy(IF_LLADDR(ifp), tmpaddr, ETHER_ADDR_LEN);
666	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
667	    (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
668		error = iavf_del_mac_filter(sc, hw->mac.addr);
669		if (error == 0)
670			iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_MAC_FILTER);
671
672		bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
673	}
674
675	error = iavf_add_mac_filter(sc, hw->mac.addr, 0);
676	if (!error || error == EEXIST)
677		iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_MAC_FILTER);
678	iflib_set_mac(ctx, hw->mac.addr);
679
680	/* Prepare the queues for operation */
681	iavf_init_queues(vsi);
682
683	/* Set initial ITR values */
684	iavf_configure_itr(sc);
685
686	iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_QUEUES);
687
688	/* Set up RSS */
689	iavf_config_rss(sc);
690
691	/* Map vectors */
692	iavf_send_vc_msg(sc, IAVF_FLAG_AQ_MAP_VECTORS);
693
694	/* Init SW TX ring indices */
695	if (vsi->enable_head_writeback)
696		ixl_init_tx_cidx(vsi);
697	else
698		ixl_init_tx_rsqs(vsi);
699
700	/* Configure promiscuous mode */
701	iavf_if_promisc_set(ctx, if_getflags(ifp));
702
703	/* Enable queues */
704	iavf_send_vc_msg_sleep(sc, IAVF_FLAG_AQ_ENABLE_QUEUES);
705
706	sc->init_state = IAVF_RUNNING;
707}
708
709/*
710 * iavf_attach() helper function; initializes the admin queue
711 * and attempts to establish contact with the PF by
712 * retrying the initial "API version" message several times
713 * or until the PF responds.
714 */
715static int
716iavf_setup_vc(struct iavf_sc *sc)
717{
718	struct i40e_hw *hw = &sc->hw;
719	device_t dev = sc->dev;
720	int error = 0, ret_error = 0, asq_retries = 0;
721	bool send_api_ver_retried = 0;
722
723	/* Need to set these AQ paramters before initializing AQ */
724	hw->aq.num_arq_entries = IXL_AQ_LEN;
725	hw->aq.num_asq_entries = IXL_AQ_LEN;
726	hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
727	hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
728
729	for (int i = 0; i < IAVF_AQ_MAX_ERR; i++) {
730		/* Initialize admin queue */
731		error = i40e_init_adminq(hw);
732		if (error) {
733			device_printf(dev, "%s: init_adminq failed: %d\n",
734			    __func__, error);
735			ret_error = 1;
736			continue;
737		}
738
739		iavf_dbg_init(sc, "Initialized Admin Queue; starting"
740		    " send_api_ver attempt %d", i+1);
741
742retry_send:
743		/* Send VF's API version */
744		error = iavf_send_api_ver(sc);
745		if (error) {
746			i40e_shutdown_adminq(hw);
747			ret_error = 2;
748			device_printf(dev, "%s: unable to send api"
749			    " version to PF on attempt %d, error %d\n",
750			    __func__, i+1, error);
751		}
752
753		asq_retries = 0;
754		while (!i40e_asq_done(hw)) {
755			if (++asq_retries > IAVF_AQ_MAX_ERR) {
756				i40e_shutdown_adminq(hw);
757				device_printf(dev, "Admin Queue timeout "
758				    "(waiting for send_api_ver), %d more tries...\n",
759				    IAVF_AQ_MAX_ERR - (i + 1));
760				ret_error = 3;
761				break;
762			}
763			i40e_msec_pause(10);
764		}
765		if (asq_retries > IAVF_AQ_MAX_ERR)
766			continue;
767
768		iavf_dbg_init(sc, "Sent API version message to PF");
769
770		/* Verify that the VF accepts the PF's API version */
771		error = iavf_verify_api_ver(sc);
772		if (error == ETIMEDOUT) {
773			if (!send_api_ver_retried) {
774				/* Resend message, one more time */
775				send_api_ver_retried = true;
776				device_printf(dev,
777				    "%s: Timeout while verifying API version on first"
778				    " try!\n", __func__);
779				goto retry_send;
780			} else {
781				device_printf(dev,
782				    "%s: Timeout while verifying API version on second"
783				    " try!\n", __func__);
784				ret_error = 4;
785				break;
786			}
787		}
788		if (error) {
789			device_printf(dev,
790			    "%s: Unable to verify API version,"
791			    " error %s\n", __func__, i40e_stat_str(hw, error));
792			ret_error = 5;
793		}
794		break;
795	}
796
797	if (ret_error >= 4)
798		i40e_shutdown_adminq(hw);
799	return (ret_error);
800}
801
802/*
803 * iavf_attach() helper function; asks the PF for this VF's
804 * configuration, and saves the information if it receives it.
805 */
806static int
807iavf_vf_config(struct iavf_sc *sc)
808{
809	struct i40e_hw *hw = &sc->hw;
810	device_t dev = sc->dev;
811	int bufsz, error = 0, ret_error = 0;
812	int asq_retries, retried = 0;
813
814retry_config:
815	error = iavf_send_vf_config_msg(sc);
816	if (error) {
817		device_printf(dev,
818		    "%s: Unable to send VF config request, attempt %d,"
819		    " error %d\n", __func__, retried + 1, error);
820		ret_error = 2;
821	}
822
823	asq_retries = 0;
824	while (!i40e_asq_done(hw)) {
825		if (++asq_retries > IAVF_AQ_MAX_ERR) {
826			device_printf(dev, "%s: Admin Queue timeout "
827			    "(waiting for send_vf_config_msg), attempt %d\n",
828			    __func__, retried + 1);
829			ret_error = 3;
830			goto fail;
831		}
832		i40e_msec_pause(10);
833	}
834
835	iavf_dbg_init(sc, "Sent VF config message to PF, attempt %d\n",
836	    retried + 1);
837
838	if (!sc->vf_res) {
839		bufsz = sizeof(struct virtchnl_vf_resource) +
840		    (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
841		sc->vf_res = malloc(bufsz, M_IAVF, M_NOWAIT);
842		if (!sc->vf_res) {
843			device_printf(dev,
844			    "%s: Unable to allocate memory for VF configuration"
845			    " message from PF on attempt %d\n", __func__, retried + 1);
846			ret_error = 1;
847			goto fail;
848		}
849	}
850
851	/* Check for VF config response */
852	error = iavf_get_vf_config(sc);
853	if (error == ETIMEDOUT) {
854		/* The 1st time we timeout, send the configuration message again */
855		if (!retried) {
856			retried++;
857			goto retry_config;
858		}
859		device_printf(dev,
860		    "%s: iavf_get_vf_config() timed out waiting for a response\n",
861		    __func__);
862	}
863	if (error) {
864		device_printf(dev,
865		    "%s: Unable to get VF configuration from PF after %d tries!\n",
866		    __func__, retried + 1);
867		ret_error = 4;
868	}
869	goto done;
870
871fail:
872	free(sc->vf_res, M_IAVF);
873done:
874	return (ret_error);
875}
876
877static int
878iavf_if_msix_intr_assign(if_ctx_t ctx, int msix)
879{
880	struct iavf_sc *sc = iflib_get_softc(ctx);
881	struct ixl_vsi *vsi = &sc->vsi;
882	struct ixl_rx_queue *rx_que = vsi->rx_queues;
883	struct ixl_tx_queue *tx_que = vsi->tx_queues;
884	int err, i, rid, vector = 0;
885	char buf[16];
886
887	MPASS(vsi->shared->isc_nrxqsets > 0);
888	MPASS(vsi->shared->isc_ntxqsets > 0);
889
890	/* Admin Que is vector 0*/
891	rid = vector + 1;
892	err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
893	    iavf_msix_adminq, sc, 0, "aq");
894	if (err) {
895		iflib_irq_free(ctx, &vsi->irq);
896		device_printf(iflib_get_dev(ctx),
897		    "Failed to register Admin Que handler");
898		return (err);
899	}
900
901	/* Now set up the stations */
902	for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {
903		rid = vector + 1;
904
905		snprintf(buf, sizeof(buf), "rxq%d", i);
906		err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
907		    IFLIB_INTR_RX, iavf_msix_que, rx_que, rx_que->rxr.me, buf);
908		/* XXX: Does the driver work as expected if there are fewer num_rx_queues than
909		 * what's expected in the iflib context? */
910		if (err) {
911			device_printf(iflib_get_dev(ctx),
912			    "Failed to allocate queue RX int vector %d, err: %d\n", i, err);
913			vsi->num_rx_queues = i + 1;
914			goto fail;
915		}
916		rx_que->msix = vector;
917	}
918
919	bzero(buf, sizeof(buf));
920
921	for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) {
922		snprintf(buf, sizeof(buf), "txq%d", i);
923		iflib_softirq_alloc_generic(ctx,
924		    &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq,
925		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
926
927		/* TODO: Maybe call a strategy function for this to figure out which
928		* interrupts to map Tx queues to. I don't know if there's an immediately
929		* better way than this other than a user-supplied map, though. */
930		tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1;
931	}
932
933	return (0);
934fail:
935	iflib_irq_free(ctx, &vsi->irq);
936	rx_que = vsi->rx_queues;
937	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
938		iflib_irq_free(ctx, &rx_que->que_irq);
939	return (err);
940}
941
942/* Enable all interrupts */
943static void
944iavf_if_enable_intr(if_ctx_t ctx)
945{
946	struct iavf_sc *sc = iflib_get_softc(ctx);
947	struct ixl_vsi *vsi = &sc->vsi;
948
949	iavf_enable_intr(vsi);
950}
951
952/* Disable all interrupts */
953static void
954iavf_if_disable_intr(if_ctx_t ctx)
955{
956	struct iavf_sc *sc = iflib_get_softc(ctx);
957	struct ixl_vsi *vsi = &sc->vsi;
958
959	iavf_disable_intr(vsi);
960}
961
962static int
963iavf_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
964{
965	struct iavf_sc *sc = iflib_get_softc(ctx);
966	struct ixl_vsi *vsi = &sc->vsi;
967	struct i40e_hw *hw = vsi->hw;
968	struct ixl_rx_queue *rx_que = &vsi->rx_queues[rxqid];
969
970	iavf_enable_queue_irq(hw, rx_que->msix - 1);
971	return (0);
972}
973
974static int
975iavf_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
976{
977	struct iavf_sc *sc = iflib_get_softc(ctx);
978	struct ixl_vsi *vsi = &sc->vsi;
979	struct i40e_hw *hw = vsi->hw;
980	struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid];
981
982	iavf_enable_queue_irq(hw, tx_que->msix - 1);
983	return (0);
984}
985
986static int
987iavf_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
988{
989	struct iavf_sc *sc = iflib_get_softc(ctx);
990	struct ixl_vsi *vsi = &sc->vsi;
991	if_softc_ctx_t scctx = vsi->shared;
992	struct ixl_tx_queue *que;
993	int i, j, error = 0;
994
995	MPASS(scctx->isc_ntxqsets > 0);
996	MPASS(ntxqs == 1);
997	MPASS(scctx->isc_ntxqsets == ntxqsets);
998
999	/* Allocate queue structure memory */
1000	if (!(vsi->tx_queues =
1001	    (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IAVF, M_NOWAIT | M_ZERO))) {
1002		device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
1003		return (ENOMEM);
1004	}
1005
1006	for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
1007		struct tx_ring *txr = &que->txr;
1008
1009		txr->me = i;
1010		que->vsi = vsi;
1011
1012		if (!vsi->enable_head_writeback) {
1013			/* Allocate report status array */
1014			if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IAVF, M_NOWAIT))) {
1015				device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
1016				error = ENOMEM;
1017				goto fail;
1018			}
1019			/* Init report status array */
1020			for (j = 0; j < scctx->isc_ntxd[0]; j++)
1021				txr->tx_rsq[j] = QIDX_INVALID;
1022		}
1023		/* get the virtual and physical address of the hardware queues */
1024		txr->tail = I40E_QTX_TAIL1(txr->me);
1025		txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs];
1026		txr->tx_paddr = paddrs[i * ntxqs];
1027		txr->que = que;
1028	}
1029
1030	return (0);
1031fail:
1032	iavf_if_queues_free(ctx);
1033	return (error);
1034}
1035
1036static int
1037iavf_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1038{
1039	struct iavf_sc *sc = iflib_get_softc(ctx);
1040	struct ixl_vsi *vsi = &sc->vsi;
1041	struct ixl_rx_queue *que;
1042	int i, error = 0;
1043
1044#ifdef INVARIANTS
1045	if_softc_ctx_t scctx = vsi->shared;
1046	MPASS(scctx->isc_nrxqsets > 0);
1047	MPASS(nrxqs == 1);
1048	MPASS(scctx->isc_nrxqsets == nrxqsets);
1049#endif
1050
1051	/* Allocate queue structure memory */
1052	if (!(vsi->rx_queues =
1053	    (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) *
1054	    nrxqsets, M_IAVF, M_NOWAIT | M_ZERO))) {
1055		device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
1056		error = ENOMEM;
1057		goto fail;
1058	}
1059
1060	for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
1061		struct rx_ring *rxr = &que->rxr;
1062
1063		rxr->me = i;
1064		que->vsi = vsi;
1065
1066		/* get the virtual and physical address of the hardware queues */
1067		rxr->tail = I40E_QRX_TAIL1(rxr->me);
1068		rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs];
1069		rxr->rx_paddr = paddrs[i * nrxqs];
1070		rxr->que = que;
1071	}
1072
1073	return (0);
1074fail:
1075	iavf_if_queues_free(ctx);
1076	return (error);
1077}
1078
1079static void
1080iavf_if_queues_free(if_ctx_t ctx)
1081{
1082	struct iavf_sc *sc = iflib_get_softc(ctx);
1083	struct ixl_vsi *vsi = &sc->vsi;
1084
1085	if (!vsi->enable_head_writeback) {
1086		struct ixl_tx_queue *que;
1087		int i = 0;
1088
1089		for (i = 0, que = vsi->tx_queues; i < vsi->shared->isc_ntxqsets; i++, que++) {
1090			struct tx_ring *txr = &que->txr;
1091			if (txr->tx_rsq != NULL) {
1092				free(txr->tx_rsq, M_IAVF);
1093				txr->tx_rsq = NULL;
1094			}
1095		}
1096	}
1097
1098	if (vsi->tx_queues != NULL) {
1099		free(vsi->tx_queues, M_IAVF);
1100		vsi->tx_queues = NULL;
1101	}
1102	if (vsi->rx_queues != NULL) {
1103		free(vsi->rx_queues, M_IAVF);
1104		vsi->rx_queues = NULL;
1105	}
1106}
1107
1108static int
1109iavf_check_aq_errors(struct iavf_sc *sc)
1110{
1111	struct i40e_hw *hw = &sc->hw;
1112	device_t dev = sc->dev;
1113	u32 reg, oldreg;
1114	u8 aq_error = false;
1115
1116	/* check for Admin queue errors */
1117	oldreg = reg = rd32(hw, hw->aq.arq.len);
1118	if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
1119		device_printf(dev, "ARQ VF Error detected\n");
1120		reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
1121		aq_error = true;
1122	}
1123	if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
1124		device_printf(dev, "ARQ Overflow Error detected\n");
1125		reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
1126		aq_error = true;
1127	}
1128	if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
1129		device_printf(dev, "ARQ Critical Error detected\n");
1130		reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
1131		aq_error = true;
1132	}
1133	if (oldreg != reg)
1134		wr32(hw, hw->aq.arq.len, reg);
1135
1136	oldreg = reg = rd32(hw, hw->aq.asq.len);
1137	if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
1138		device_printf(dev, "ASQ VF Error detected\n");
1139		reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
1140		aq_error = true;
1141	}
1142	if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
1143		device_printf(dev, "ASQ Overflow Error detected\n");
1144		reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
1145		aq_error = true;
1146	}
1147	if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
1148		device_printf(dev, "ASQ Critical Error detected\n");
1149		reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
1150		aq_error = true;
1151	}
1152	if (oldreg != reg)
1153		wr32(hw, hw->aq.asq.len, reg);
1154
1155	if (aq_error) {
1156		device_printf(dev, "WARNING: Stopping VF!\n");
1157		/*
1158		 * A VF reset might not be enough to fix a problem here;
1159		 * a PF reset could be required.
1160		 */
1161		sc->init_state = IAVF_RESET_REQUIRED;
1162		iavf_stop(sc);
1163		iavf_request_reset(sc);
1164	}
1165
1166	return (aq_error ? EIO : 0);
1167}
1168
1169static enum i40e_status_code
1170iavf_process_adminq(struct iavf_sc *sc, u16 *pending)
1171{
1172	enum i40e_status_code status = I40E_SUCCESS;
1173	struct i40e_arq_event_info event;
1174	struct i40e_hw *hw = &sc->hw;
1175	struct virtchnl_msg *v_msg;
1176	int error = 0, loop = 0;
1177	u32 reg;
1178
1179	error = iavf_check_aq_errors(sc);
1180	if (error)
1181		return (I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR);
1182
1183	event.buf_len = IXL_AQ_BUF_SZ;
1184        event.msg_buf = sc->aq_buffer;
1185	bzero(event.msg_buf, IXL_AQ_BUF_SZ);
1186	v_msg = (struct virtchnl_msg *)&event.desc;
1187
1188	/* clean and process any events */
1189	do {
1190		status = i40e_clean_arq_element(hw, &event, pending);
1191		/*
1192		 * Also covers normal case when i40e_clean_arq_element()
1193		 * returns "I40E_ERR_ADMIN_QUEUE_NO_WORK"
1194		 */
1195		if (status)
1196			break;
1197		iavf_vc_completion(sc, v_msg->v_opcode,
1198		    v_msg->v_retval, event.msg_buf, event.msg_len);
1199		bzero(event.msg_buf, IXL_AQ_BUF_SZ);
1200	} while (*pending && (loop++ < IXL_ADM_LIMIT));
1201
1202	/* Re-enable admin queue interrupt cause */
1203	reg = rd32(hw, I40E_VFINT_ICR0_ENA1);
1204	reg |= I40E_VFINT_ICR0_ENA1_ADMINQ_MASK;
1205	wr32(hw, I40E_VFINT_ICR0_ENA1, reg);
1206
1207	return (status);
1208}
1209
1210static void
1211iavf_if_update_admin_status(if_ctx_t ctx)
1212{
1213	struct iavf_sc *sc = iflib_get_softc(ctx);
1214	struct i40e_hw *hw = &sc->hw;
1215	u16 pending;
1216
1217	iavf_process_adminq(sc, &pending);
1218	iavf_update_link_status(sc);
1219
1220	/*
1221	 * If there are still messages to process, reschedule.
1222	 * Otherwise, re-enable the Admin Queue interrupt.
1223	 */
1224	if (pending > 0)
1225		iflib_admin_intr_deferred(ctx);
1226	else
1227		iavf_enable_adminq_irq(hw);
1228}
1229
1230static u_int
1231iavf_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int count __unused)
1232{
1233	struct iavf_sc *sc = arg;
1234	int error;
1235
1236	error = iavf_add_mac_filter(sc, (u8*)LLADDR(sdl), IXL_FILTER_MC);
1237	return (!error);
1238}
1239
1240static void
1241iavf_if_multi_set(if_ctx_t ctx)
1242{
1243	struct iavf_sc *sc = iflib_get_softc(ctx);
1244
1245	IOCTL_DEBUGOUT("iavf_if_multi_set: begin");
1246
1247	if (__predict_false(if_llmaddr_count(iflib_get_ifp(ctx)) >=
1248	    MAX_MULTICAST_ADDR)) {
1249		/* Delete MC filters and enable mulitcast promisc instead */
1250		iavf_init_multi(sc);
1251		sc->promisc_flags |= FLAG_VF_MULTICAST_PROMISC;
1252		iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_PROMISC);
1253		return;
1254	}
1255
1256	/* If there aren't too many filters, delete existing MC filters */
1257	iavf_init_multi(sc);
1258
1259	/* And (re-)install filters for all mcast addresses */
1260	if (if_foreach_llmaddr(iflib_get_ifp(ctx), iavf_mc_filter_apply, sc) >
1261	    0)
1262		iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_MAC_FILTER);
1263}
1264
1265static int
1266iavf_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1267{
1268	struct iavf_sc *sc = iflib_get_softc(ctx);
1269	struct ixl_vsi *vsi = &sc->vsi;
1270
1271	IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1272	if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN -
1273		ETHER_VLAN_ENCAP_LEN)
1274		return (EINVAL);
1275
1276	vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1277		ETHER_VLAN_ENCAP_LEN;
1278
1279	return (0);
1280}
1281
1282static void
1283iavf_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1284{
1285#ifdef IXL_DEBUG
1286	struct ifnet *ifp = iflib_get_ifp(ctx);
1287#endif
1288	struct iavf_sc *sc = iflib_get_softc(ctx);
1289
1290	INIT_DBG_IF(ifp, "begin");
1291
1292	iavf_update_link_status(sc);
1293
1294	ifmr->ifm_status = IFM_AVALID;
1295	ifmr->ifm_active = IFM_ETHER;
1296
1297	if (!sc->link_up)
1298		return;
1299
1300	ifmr->ifm_status |= IFM_ACTIVE;
1301	/* Hardware is always full-duplex */
1302	ifmr->ifm_active |= IFM_FDX;
1303
1304	/* Based on the link speed reported by the PF over the AdminQ, choose a
1305	 * PHY type to report. This isn't 100% correct since we don't really
1306	 * know the underlying PHY type of the PF, but at least we can report
1307	 * a valid link speed...
1308	 */
1309	switch (sc->link_speed) {
1310	case VIRTCHNL_LINK_SPEED_100MB:
1311		ifmr->ifm_active |= IFM_100_TX;
1312		break;
1313	case VIRTCHNL_LINK_SPEED_1GB:
1314		ifmr->ifm_active |= IFM_1000_T;
1315		break;
1316	case VIRTCHNL_LINK_SPEED_10GB:
1317		ifmr->ifm_active |= IFM_10G_SR;
1318		break;
1319	case VIRTCHNL_LINK_SPEED_20GB:
1320	case VIRTCHNL_LINK_SPEED_25GB:
1321		ifmr->ifm_active |= IFM_25G_SR;
1322		break;
1323	case VIRTCHNL_LINK_SPEED_40GB:
1324		ifmr->ifm_active |= IFM_40G_SR4;
1325		break;
1326	default:
1327		ifmr->ifm_active |= IFM_UNKNOWN;
1328		break;
1329	}
1330
1331	INIT_DBG_IF(ifp, "end");
1332}
1333
1334static int
1335iavf_if_media_change(if_ctx_t ctx)
1336{
1337	struct ifmedia *ifm = iflib_get_media(ctx);
1338
1339	INIT_DEBUGOUT("ixl_media_change: begin");
1340
1341	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1342		return (EINVAL);
1343
1344	if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n");
1345	return (ENODEV);
1346}
1347
1348static int
1349iavf_if_promisc_set(if_ctx_t ctx, int flags)
1350{
1351	struct iavf_sc *sc = iflib_get_softc(ctx);
1352	struct ifnet	*ifp = iflib_get_ifp(ctx);
1353
1354	sc->promisc_flags = 0;
1355
1356	if (flags & IFF_ALLMULTI || if_llmaddr_count(ifp) >=
1357	    MAX_MULTICAST_ADDR)
1358		sc->promisc_flags |= FLAG_VF_MULTICAST_PROMISC;
1359	if (flags & IFF_PROMISC)
1360		sc->promisc_flags |= FLAG_VF_UNICAST_PROMISC;
1361
1362	iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_PROMISC);
1363
1364	return (0);
1365}
1366
1367static void
1368iavf_if_timer(if_ctx_t ctx, uint16_t qid)
1369{
1370	struct iavf_sc *sc = iflib_get_softc(ctx);
1371	struct i40e_hw *hw = &sc->hw;
1372	u32 val;
1373
1374	if (qid != 0)
1375		return;
1376
1377	/* Check for when PF triggers a VF reset */
1378	val = rd32(hw, I40E_VFGEN_RSTAT) &
1379	    I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1380	if (val != VIRTCHNL_VFR_VFACTIVE
1381	    && val != VIRTCHNL_VFR_COMPLETED) {
1382		iavf_dbg_info(sc, "reset in progress! (%d)\n", val);
1383		return;
1384	}
1385
1386	/* Fire off the adminq task */
1387	iflib_admin_intr_deferred(ctx);
1388
1389	/* Update stats */
1390	iavf_request_stats(sc);
1391}
1392
1393static void
1394iavf_if_vlan_register(if_ctx_t ctx, u16 vtag)
1395{
1396	struct iavf_sc *sc = iflib_get_softc(ctx);
1397	struct ixl_vsi *vsi = &sc->vsi;
1398	struct iavf_vlan_filter	*v;
1399
1400	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1401		return;
1402
1403	++vsi->num_vlans;
1404	v = malloc(sizeof(struct iavf_vlan_filter), M_IAVF, M_WAITOK | M_ZERO);
1405	SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1406	v->vlan = vtag;
1407	v->flags = IXL_FILTER_ADD;
1408
1409	iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
1410}
1411
1412static void
1413iavf_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1414{
1415	struct iavf_sc *sc = iflib_get_softc(ctx);
1416	struct ixl_vsi *vsi = &sc->vsi;
1417	struct iavf_vlan_filter	*v;
1418	int			i = 0;
1419
1420	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1421		return;
1422
1423	SLIST_FOREACH(v, sc->vlan_filters, next) {
1424		if (v->vlan == vtag) {
1425			v->flags = IXL_FILTER_DEL;
1426			++i;
1427			--vsi->num_vlans;
1428		}
1429	}
1430	if (i)
1431		iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_VLAN_FILTER);
1432}
1433
1434static uint64_t
1435iavf_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1436{
1437	struct iavf_sc *sc = iflib_get_softc(ctx);
1438	struct ixl_vsi *vsi = &sc->vsi;
1439	if_t ifp = iflib_get_ifp(ctx);
1440
1441	switch (cnt) {
1442	case IFCOUNTER_IPACKETS:
1443		return (vsi->ipackets);
1444	case IFCOUNTER_IERRORS:
1445		return (vsi->ierrors);
1446	case IFCOUNTER_OPACKETS:
1447		return (vsi->opackets);
1448	case IFCOUNTER_OERRORS:
1449		return (vsi->oerrors);
1450	case IFCOUNTER_COLLISIONS:
1451		/* Collisions are by standard impossible in 40G/10G Ethernet */
1452		return (0);
1453	case IFCOUNTER_IBYTES:
1454		return (vsi->ibytes);
1455	case IFCOUNTER_OBYTES:
1456		return (vsi->obytes);
1457	case IFCOUNTER_IMCASTS:
1458		return (vsi->imcasts);
1459	case IFCOUNTER_OMCASTS:
1460		return (vsi->omcasts);
1461	case IFCOUNTER_IQDROPS:
1462		return (vsi->iqdrops);
1463	case IFCOUNTER_OQDROPS:
1464		return (vsi->oqdrops);
1465	case IFCOUNTER_NOPROTO:
1466		return (vsi->noproto);
1467	default:
1468		return (if_get_counter_default(ifp, cnt));
1469	}
1470}
1471
1472/* iavf_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1473 * @ctx: iflib context
1474 * @event: event code to check
1475 *
1476 * Defaults to returning true for every event.
1477 *
1478 * @returns true if iflib needs to reinit the interface
1479 */
1480static bool
1481iavf_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1482{
1483	switch (event) {
1484	case IFLIB_RESTART_VLAN_CONFIG:
1485		/* This case must return true if VLAN anti-spoof checks are
1486		 * enabled by the PF driver for the VF.
1487		 */
1488	default:
1489		return (true);
1490	}
1491}
1492
1493static void
1494iavf_free_pci_resources(struct iavf_sc *sc)
1495{
1496	struct ixl_vsi		*vsi = &sc->vsi;
1497	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
1498	device_t                dev = sc->dev;
1499
1500	/* We may get here before stations are set up */
1501	if (rx_que == NULL)
1502		goto early;
1503
1504	/* Release all interrupts */
1505	iflib_irq_free(vsi->ctx, &vsi->irq);
1506
1507	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1508		iflib_irq_free(vsi->ctx, &rx_que->que_irq);
1509
1510early:
1511	if (sc->pci_mem != NULL)
1512		bus_release_resource(dev, SYS_RES_MEMORY,
1513		    rman_get_rid(sc->pci_mem), sc->pci_mem);
1514}
1515
1516
1517/*
1518** Requests a VF reset from the PF.
1519**
1520** Requires the VF's Admin Queue to be initialized.
1521*/
1522static int
1523iavf_reset(struct iavf_sc *sc)
1524{
1525	struct i40e_hw	*hw = &sc->hw;
1526	device_t	dev = sc->dev;
1527	int		error = 0;
1528
1529	/* Ask the PF to reset us if we are initiating */
1530	if (sc->init_state != IAVF_RESET_PENDING)
1531		iavf_request_reset(sc);
1532
1533	i40e_msec_pause(100);
1534	error = iavf_reset_complete(hw);
1535	if (error) {
1536		device_printf(dev, "%s: VF reset failed\n",
1537		    __func__);
1538		return (error);
1539	}
1540	pci_enable_busmaster(dev);
1541
1542	error = i40e_shutdown_adminq(hw);
1543	if (error) {
1544		device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1545		    __func__, error);
1546		return (error);
1547	}
1548
1549	error = i40e_init_adminq(hw);
1550	if (error) {
1551		device_printf(dev, "%s: init_adminq failed: %d\n",
1552		    __func__, error);
1553		return (error);
1554	}
1555
1556	iavf_enable_adminq_irq(hw);
1557	return (0);
1558}
1559
1560static int
1561iavf_reset_complete(struct i40e_hw *hw)
1562{
1563	u32 reg;
1564
1565	/* Wait up to ~10 seconds */
1566	for (int i = 0; i < 100; i++) {
1567		reg = rd32(hw, I40E_VFGEN_RSTAT) &
1568		    I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1569
1570                if ((reg == VIRTCHNL_VFR_VFACTIVE) ||
1571		    (reg == VIRTCHNL_VFR_COMPLETED))
1572			return (0);
1573		i40e_msec_pause(100);
1574	}
1575
1576	return (EBUSY);
1577}
1578
1579static void
1580iavf_setup_interface(device_t dev, struct iavf_sc *sc)
1581{
1582	struct ixl_vsi *vsi = &sc->vsi;
1583	if_ctx_t ctx = vsi->ctx;
1584	struct ifnet *ifp = iflib_get_ifp(ctx);
1585
1586	INIT_DBG_DEV(dev, "begin");
1587
1588	vsi->shared->isc_max_frame_size =
1589	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1590	    + ETHER_VLAN_ENCAP_LEN;
1591#if __FreeBSD_version >= 1100000
1592	if_setbaudrate(ifp, IF_Gbps(40));
1593#else
1594	if_initbaudrate(ifp, IF_Gbps(40));
1595#endif
1596
1597	ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1598	ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
1599}
1600
1601/*
1602** Get a new filter and add it to the mac filter list.
1603*/
1604static struct iavf_mac_filter *
1605iavf_get_mac_filter(struct iavf_sc *sc)
1606{
1607	struct iavf_mac_filter	*f;
1608
1609	f = malloc(sizeof(struct iavf_mac_filter),
1610	    M_IAVF, M_NOWAIT | M_ZERO);
1611	if (f)
1612		SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1613
1614	return (f);
1615}
1616
1617/*
1618** Find the filter with matching MAC address
1619*/
1620static struct iavf_mac_filter *
1621iavf_find_mac_filter(struct iavf_sc *sc, u8 *macaddr)
1622{
1623	struct iavf_mac_filter	*f;
1624	bool match = FALSE;
1625
1626	SLIST_FOREACH(f, sc->mac_filters, next) {
1627		if (cmp_etheraddr(f->macaddr, macaddr)) {
1628			match = TRUE;
1629			break;
1630		}
1631	}
1632
1633	if (!match)
1634		f = NULL;
1635	return (f);
1636}
1637
1638/*
1639** Admin Queue interrupt handler
1640*/
1641static int
1642iavf_msix_adminq(void *arg)
1643{
1644	struct iavf_sc	*sc = arg;
1645	struct i40e_hw	*hw = &sc->hw;
1646	u32		reg, mask;
1647	bool		do_task = FALSE;
1648
1649	++sc->admin_irq;
1650
1651        reg = rd32(hw, I40E_VFINT_ICR01);
1652	/*
1653	 * For masking off interrupt causes that need to be handled before
1654	 * they can be re-enabled
1655	 */
1656        mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1657
1658	/* Check on the cause */
1659	if (reg & I40E_VFINT_ICR0_ADMINQ_MASK) {
1660		mask &= ~I40E_VFINT_ICR0_ENA_ADMINQ_MASK;
1661		do_task = TRUE;
1662	}
1663
1664	wr32(hw, I40E_VFINT_ICR0_ENA1, mask);
1665	iavf_enable_adminq_irq(hw);
1666
1667	if (do_task)
1668		return (FILTER_SCHEDULE_THREAD);
1669	else
1670		return (FILTER_HANDLED);
1671}
1672
1673void
1674iavf_enable_intr(struct ixl_vsi *vsi)
1675{
1676	struct i40e_hw *hw = vsi->hw;
1677	struct ixl_rx_queue *que = vsi->rx_queues;
1678
1679	iavf_enable_adminq_irq(hw);
1680	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1681		iavf_enable_queue_irq(hw, que->rxr.me);
1682}
1683
1684void
1685iavf_disable_intr(struct ixl_vsi *vsi)
1686{
1687        struct i40e_hw *hw = vsi->hw;
1688        struct ixl_rx_queue *que = vsi->rx_queues;
1689
1690	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1691		iavf_disable_queue_irq(hw, que->rxr.me);
1692}
1693
1694static void
1695iavf_disable_adminq_irq(struct i40e_hw *hw)
1696{
1697	wr32(hw, I40E_VFINT_DYN_CTL01, 0);
1698	wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
1699	/* flush */
1700	rd32(hw, I40E_VFGEN_RSTAT);
1701}
1702
1703static void
1704iavf_enable_adminq_irq(struct i40e_hw *hw)
1705{
1706	wr32(hw, I40E_VFINT_DYN_CTL01,
1707	    I40E_VFINT_DYN_CTL01_INTENA_MASK |
1708	    I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1709	wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
1710	/* flush */
1711	rd32(hw, I40E_VFGEN_RSTAT);
1712}
1713
1714static void
1715iavf_enable_queue_irq(struct i40e_hw *hw, int id)
1716{
1717	u32		reg;
1718
1719	reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1720	    I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
1721	    I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK;
1722	wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
1723}
1724
1725static void
1726iavf_disable_queue_irq(struct i40e_hw *hw, int id)
1727{
1728	wr32(hw, I40E_VFINT_DYN_CTLN1(id),
1729	    I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
1730	rd32(hw, I40E_VFGEN_RSTAT);
1731}
1732
1733static void
1734iavf_configure_tx_itr(struct iavf_sc *sc)
1735{
1736	struct i40e_hw		*hw = &sc->hw;
1737	struct ixl_vsi		*vsi = &sc->vsi;
1738	struct ixl_tx_queue	*que = vsi->tx_queues;
1739
1740	vsi->tx_itr_setting = sc->tx_itr;
1741
1742	for (int i = 0; i < vsi->num_tx_queues; i++, que++) {
1743		struct tx_ring	*txr = &que->txr;
1744
1745		wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, i),
1746		    vsi->tx_itr_setting);
1747		txr->itr = vsi->tx_itr_setting;
1748		txr->latency = IXL_AVE_LATENCY;
1749	}
1750}
1751
1752static void
1753iavf_configure_rx_itr(struct iavf_sc *sc)
1754{
1755	struct i40e_hw		*hw = &sc->hw;
1756	struct ixl_vsi		*vsi = &sc->vsi;
1757	struct ixl_rx_queue	*que = vsi->rx_queues;
1758
1759	vsi->rx_itr_setting = sc->rx_itr;
1760
1761	for (int i = 0; i < vsi->num_rx_queues; i++, que++) {
1762		struct rx_ring 	*rxr = &que->rxr;
1763
1764		wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, i),
1765		    vsi->rx_itr_setting);
1766		rxr->itr = vsi->rx_itr_setting;
1767		rxr->latency = IXL_AVE_LATENCY;
1768	}
1769}
1770
1771/*
1772 * Get initial ITR values from tunable values.
1773 */
1774static void
1775iavf_configure_itr(struct iavf_sc *sc)
1776{
1777	iavf_configure_tx_itr(sc);
1778	iavf_configure_rx_itr(sc);
1779}
1780
1781/*
1782** Provide a update to the queue RX
1783** interrupt moderation value.
1784*/
1785static void
1786iavf_set_queue_rx_itr(struct ixl_rx_queue *que)
1787{
1788	struct ixl_vsi	*vsi = que->vsi;
1789	struct i40e_hw	*hw = vsi->hw;
1790	struct rx_ring	*rxr = &que->rxr;
1791
1792	/* Idle, do nothing */
1793	if (rxr->bytes == 0)
1794		return;
1795
1796	/* Update the hardware if needed */
1797	if (rxr->itr != vsi->rx_itr_setting) {
1798		rxr->itr = vsi->rx_itr_setting;
1799		wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1800		    que->rxr.me), rxr->itr);
1801	}
1802}
1803
1804static int
1805iavf_msix_que(void *arg)
1806{
1807	struct ixl_rx_queue *rx_que = arg;
1808
1809	++rx_que->irqs;
1810
1811	iavf_set_queue_rx_itr(rx_que);
1812	// iavf_set_queue_tx_itr(que);
1813
1814	return (FILTER_SCHEDULE_THREAD);
1815}
1816
1817/*********************************************************************
1818 *  Multicast Initialization
1819 *
1820 *  This routine is called by init to reset a fresh state.
1821 *
1822 **********************************************************************/
1823static void
1824iavf_init_multi(struct iavf_sc *sc)
1825{
1826	struct iavf_mac_filter *f;
1827	int mcnt = 0;
1828
1829	/* First clear any multicast filters */
1830	SLIST_FOREACH(f, sc->mac_filters, next) {
1831		if ((f->flags & IXL_FILTER_USED)
1832		    && (f->flags & IXL_FILTER_MC)) {
1833			f->flags |= IXL_FILTER_DEL;
1834			mcnt++;
1835		}
1836	}
1837	if (mcnt > 0)
1838		iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_MAC_FILTER);
1839}
1840
1841/*
1842** Note: this routine updates the OS on the link state
1843**	the real check of the hardware only happens with
1844**	a link interrupt.
1845*/
1846void
1847iavf_update_link_status(struct iavf_sc *sc)
1848{
1849	struct ixl_vsi *vsi = &sc->vsi;
1850	u64 baudrate;
1851
1852	if (sc->link_up){
1853		if (vsi->link_active == FALSE) {
1854			vsi->link_active = TRUE;
1855			baudrate = ixl_max_vc_speed_to_value(sc->link_speed);
1856			iavf_dbg_info(sc, "baudrate: %lu\n", baudrate);
1857			iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
1858		}
1859	} else { /* Link down */
1860		if (vsi->link_active == TRUE) {
1861			vsi->link_active = FALSE;
1862			iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
1863		}
1864	}
1865}
1866
1867/*********************************************************************
1868 *
1869 *  This routine disables all traffic on the adapter by issuing a
1870 *  global reset on the MAC and deallocates TX/RX buffers.
1871 *
1872 **********************************************************************/
1873
1874static void
1875iavf_stop(struct iavf_sc *sc)
1876{
1877	struct ifnet *ifp;
1878
1879	ifp = sc->vsi.ifp;
1880
1881	iavf_disable_intr(&sc->vsi);
1882
1883	if (atomic_load_acq_32(&sc->queues_enabled))
1884		iavf_send_vc_msg_sleep(sc, IAVF_FLAG_AQ_DISABLE_QUEUES);
1885}
1886
1887static void
1888iavf_if_stop(if_ctx_t ctx)
1889{
1890	struct iavf_sc *sc = iflib_get_softc(ctx);
1891
1892	iavf_stop(sc);
1893}
1894
1895static void
1896iavf_config_rss_reg(struct iavf_sc *sc)
1897{
1898	struct i40e_hw	*hw = &sc->hw;
1899	struct ixl_vsi	*vsi = &sc->vsi;
1900	u32		lut = 0;
1901	u64		set_hena = 0, hena;
1902	int		i, j, que_id;
1903	u32		rss_seed[IXL_RSS_KEY_SIZE_REG];
1904#ifdef RSS
1905	u32		rss_hash_config;
1906#endif
1907
1908	/* Don't set up RSS if using a single queue */
1909	if (vsi->num_rx_queues == 1) {
1910		wr32(hw, I40E_VFQF_HENA(0), 0);
1911		wr32(hw, I40E_VFQF_HENA(1), 0);
1912		ixl_flush(hw);
1913		return;
1914	}
1915
1916#ifdef RSS
1917	/* Fetch the configured RSS key */
1918	rss_getkey((uint8_t *) &rss_seed);
1919#else
1920	ixl_get_default_rss_key(rss_seed);
1921#endif
1922
1923	/* Fill out hash function seed */
1924	for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
1925                wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
1926
1927	/* Enable PCTYPES for RSS: */
1928#ifdef RSS
1929	rss_hash_config = rss_gethashconfig();
1930	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1931                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1932	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1933                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1934	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1935                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1936	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1937                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1938	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1939		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1940	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1941                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1942        if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1943                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1944#else
1945	set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1946#endif
1947	hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
1948	    ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
1949	hena |= set_hena;
1950	wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
1951	wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
1952
1953	/* Populate the LUT with max no. of queues in round robin fashion */
1954	for (i = 0, j = 0; i < IXL_RSS_VSI_LUT_SIZE; i++, j++) {
1955                if (j == vsi->num_rx_queues)
1956                        j = 0;
1957#ifdef RSS
1958		/*
1959		 * Fetch the RSS bucket id for the given indirection entry.
1960		 * Cap it at the number of configured buckets (which is
1961		 * num_rx_queues.)
1962		 */
1963		que_id = rss_get_indirection_to_bucket(i);
1964		que_id = que_id % vsi->num_rx_queues;
1965#else
1966		que_id = j;
1967#endif
1968                /* lut = 4-byte sliding window of 4 lut entries */
1969                lut = (lut << 8) | (que_id & IXL_RSS_VF_LUT_ENTRY_MASK);
1970                /* On i = 3, we have 4 entries in lut; write to the register */
1971                if ((i & 3) == 3) {
1972                        wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
1973			DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
1974		}
1975        }
1976	ixl_flush(hw);
1977}
1978
1979static void
1980iavf_config_rss_pf(struct iavf_sc *sc)
1981{
1982	iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIG_RSS_KEY);
1983
1984	iavf_send_vc_msg(sc, IAVF_FLAG_AQ_SET_RSS_HENA);
1985
1986	iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIG_RSS_LUT);
1987}
1988
1989/*
1990** iavf_config_rss - setup RSS
1991**
1992** RSS keys and table are cleared on VF reset.
1993*/
1994static void
1995iavf_config_rss(struct iavf_sc *sc)
1996{
1997	if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_REG) {
1998		iavf_dbg_info(sc, "Setting up RSS using VF registers...");
1999		iavf_config_rss_reg(sc);
2000	} else if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2001		iavf_dbg_info(sc, "Setting up RSS using messages to PF...");
2002		iavf_config_rss_pf(sc);
2003	} else
2004		device_printf(sc->dev, "VF does not support RSS capability sent by PF.\n");
2005}
2006
2007/*
2008** This routine adds new MAC filters to the sc's list;
2009** these are later added in hardware by sending a virtual
2010** channel message.
2011*/
2012static int
2013iavf_add_mac_filter(struct iavf_sc *sc, u8 *macaddr, u16 flags)
2014{
2015	struct iavf_mac_filter	*f;
2016
2017	/* Does one already exist? */
2018	f = iavf_find_mac_filter(sc, macaddr);
2019	if (f != NULL) {
2020		iavf_dbg_filter(sc, "exists: " MAC_FORMAT "\n",
2021		    MAC_FORMAT_ARGS(macaddr));
2022		return (EEXIST);
2023	}
2024
2025	/* If not, get a new empty filter */
2026	f = iavf_get_mac_filter(sc);
2027	if (f == NULL) {
2028		device_printf(sc->dev, "%s: no filters available!!\n",
2029		    __func__);
2030		return (ENOMEM);
2031	}
2032
2033	iavf_dbg_filter(sc, "marked: " MAC_FORMAT "\n",
2034	    MAC_FORMAT_ARGS(macaddr));
2035
2036	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2037	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2038	f->flags |= flags;
2039	return (0);
2040}
2041
2042/*
2043** Marks a MAC filter for deletion.
2044*/
2045static int
2046iavf_del_mac_filter(struct iavf_sc *sc, u8 *macaddr)
2047{
2048	struct iavf_mac_filter	*f;
2049
2050	f = iavf_find_mac_filter(sc, macaddr);
2051	if (f == NULL)
2052		return (ENOENT);
2053
2054	f->flags |= IXL_FILTER_DEL;
2055	return (0);
2056}
2057
2058/*
2059 * Re-uses the name from the PF driver.
2060 */
2061static void
2062iavf_add_device_sysctls(struct iavf_sc *sc)
2063{
2064	struct ixl_vsi *vsi = &sc->vsi;
2065	device_t dev = sc->dev;
2066
2067	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2068	struct sysctl_oid_list *ctx_list =
2069	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2070	struct sysctl_oid *debug_node;
2071	struct sysctl_oid_list *debug_list;
2072
2073	SYSCTL_ADD_PROC(ctx, ctx_list,
2074	    OID_AUTO, "current_speed",
2075	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2076	    sc, 0, iavf_sysctl_current_speed, "A", "Current Port Speed");
2077
2078	SYSCTL_ADD_PROC(ctx, ctx_list,
2079	    OID_AUTO, "tx_itr",
2080	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2081	    sc, 0, iavf_sysctl_tx_itr, "I",
2082	    "Immediately set TX ITR value for all queues");
2083
2084	SYSCTL_ADD_PROC(ctx, ctx_list,
2085	    OID_AUTO, "rx_itr",
2086	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2087	    sc, 0, iavf_sysctl_rx_itr, "I",
2088	    "Immediately set RX ITR value for all queues");
2089
2090	/* Add sysctls meant to print debug information, but don't list them
2091	 * in "sysctl -a" output. */
2092	debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2093	    OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE,
2094	    NULL, "Debug Sysctls");
2095	debug_list = SYSCTL_CHILDREN(debug_node);
2096
2097	SYSCTL_ADD_UINT(ctx, debug_list,
2098	    OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2099	    &sc->hw.debug_mask, 0, "Shared code debug message level");
2100
2101	SYSCTL_ADD_UINT(ctx, debug_list,
2102	    OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2103	    &sc->dbg_mask, 0, "Non-shared code debug message level");
2104
2105	SYSCTL_ADD_PROC(ctx, debug_list,
2106	    OID_AUTO, "filter_list",
2107	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2108	    sc, 0, iavf_sysctl_sw_filter_list, "A", "SW Filter List");
2109
2110	SYSCTL_ADD_PROC(ctx, debug_list,
2111	    OID_AUTO, "queue_interrupt_table",
2112	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2113	    sc, 0, iavf_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2114
2115	SYSCTL_ADD_PROC(ctx, debug_list,
2116	    OID_AUTO, "do_vf_reset",
2117	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2118	    sc, 0, iavf_sysctl_vf_reset, "A", "Request a VF reset from PF");
2119
2120	SYSCTL_ADD_PROC(ctx, debug_list,
2121	    OID_AUTO, "do_vflr_reset",
2122	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2123	    sc, 0, iavf_sysctl_vflr_reset, "A", "Request a VFLR reset from HW");
2124
2125	/* Add stats sysctls */
2126	ixl_add_vsi_sysctls(dev, vsi, ctx, "vsi");
2127	ixl_vsi_add_queues_stats(vsi, ctx);
2128
2129}
2130
2131static void
2132iavf_init_filters(struct iavf_sc *sc)
2133{
2134	sc->mac_filters = malloc(sizeof(struct mac_list),
2135	    M_IAVF, M_WAITOK | M_ZERO);
2136	SLIST_INIT(sc->mac_filters);
2137	sc->vlan_filters = malloc(sizeof(struct vlan_list),
2138	    M_IAVF, M_WAITOK | M_ZERO);
2139	SLIST_INIT(sc->vlan_filters);
2140}
2141
2142static void
2143iavf_free_filters(struct iavf_sc *sc)
2144{
2145	struct iavf_mac_filter *f;
2146	struct iavf_vlan_filter *v;
2147
2148	while (!SLIST_EMPTY(sc->mac_filters)) {
2149		f = SLIST_FIRST(sc->mac_filters);
2150		SLIST_REMOVE_HEAD(sc->mac_filters, next);
2151		free(f, M_IAVF);
2152	}
2153	free(sc->mac_filters, M_IAVF);
2154	while (!SLIST_EMPTY(sc->vlan_filters)) {
2155		v = SLIST_FIRST(sc->vlan_filters);
2156		SLIST_REMOVE_HEAD(sc->vlan_filters, next);
2157		free(v, M_IAVF);
2158	}
2159	free(sc->vlan_filters, M_IAVF);
2160}
2161
2162char *
2163iavf_vc_speed_to_string(enum virtchnl_link_speed link_speed)
2164{
2165	int index;
2166
2167	char *speeds[] = {
2168		"Unknown",
2169		"100 Mbps",
2170		"1 Gbps",
2171		"10 Gbps",
2172		"40 Gbps",
2173		"20 Gbps",
2174		"25 Gbps",
2175	};
2176
2177	switch (link_speed) {
2178	case VIRTCHNL_LINK_SPEED_100MB:
2179		index = 1;
2180		break;
2181	case VIRTCHNL_LINK_SPEED_1GB:
2182		index = 2;
2183		break;
2184	case VIRTCHNL_LINK_SPEED_10GB:
2185		index = 3;
2186		break;
2187	case VIRTCHNL_LINK_SPEED_40GB:
2188		index = 4;
2189		break;
2190	case VIRTCHNL_LINK_SPEED_20GB:
2191		index = 5;
2192		break;
2193	case VIRTCHNL_LINK_SPEED_25GB:
2194		index = 6;
2195		break;
2196	case VIRTCHNL_LINK_SPEED_UNKNOWN:
2197	default:
2198		index = 0;
2199		break;
2200	}
2201
2202	return speeds[index];
2203}
2204
2205static int
2206iavf_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
2207{
2208	struct iavf_sc *sc = (struct iavf_sc *)arg1;
2209	int error = 0;
2210
2211	error = sysctl_handle_string(oidp,
2212	  iavf_vc_speed_to_string(sc->link_speed),
2213	  8, req);
2214	return (error);
2215}
2216
2217/*
2218 * Sanity check and save off tunable values.
2219 */
2220static void
2221iavf_save_tunables(struct iavf_sc *sc)
2222{
2223	device_t dev = sc->dev;
2224
2225	/* Save tunable information */
2226	sc->dbg_mask = iavf_core_debug_mask;
2227	sc->hw.debug_mask = iavf_shared_debug_mask;
2228	sc->vsi.enable_head_writeback = !!(iavf_enable_head_writeback);
2229
2230	if (iavf_tx_itr < 0 || iavf_tx_itr > IXL_MAX_ITR) {
2231		device_printf(dev, "Invalid tx_itr value of %d set!\n",
2232		    iavf_tx_itr);
2233		device_printf(dev, "tx_itr must be between %d and %d, "
2234		    "inclusive\n",
2235		    0, IXL_MAX_ITR);
2236		device_printf(dev, "Using default value of %d instead\n",
2237		    IXL_ITR_4K);
2238		sc->tx_itr = IXL_ITR_4K;
2239	} else
2240		sc->tx_itr = iavf_tx_itr;
2241
2242	if (iavf_rx_itr < 0 || iavf_rx_itr > IXL_MAX_ITR) {
2243		device_printf(dev, "Invalid rx_itr value of %d set!\n",
2244		    iavf_rx_itr);
2245		device_printf(dev, "rx_itr must be between %d and %d, "
2246		    "inclusive\n",
2247		    0, IXL_MAX_ITR);
2248		device_printf(dev, "Using default value of %d instead\n",
2249		    IXL_ITR_8K);
2250		sc->rx_itr = IXL_ITR_8K;
2251	} else
2252		sc->rx_itr = iavf_rx_itr;
2253}
2254
2255/*
2256 * Used to set the Tx ITR value for all of the VF's queues.
2257 * Writes to the ITR registers immediately.
2258 */
2259static int
2260iavf_sysctl_tx_itr(SYSCTL_HANDLER_ARGS)
2261{
2262	struct iavf_sc *sc = (struct iavf_sc *)arg1;
2263	device_t dev = sc->dev;
2264	int requested_tx_itr;
2265	int error = 0;
2266
2267	requested_tx_itr = sc->tx_itr;
2268	error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
2269	if ((error) || (req->newptr == NULL))
2270		return (error);
2271	if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
2272		device_printf(dev,
2273		    "Invalid TX itr value; value must be between 0 and %d\n",
2274		        IXL_MAX_ITR);
2275		return (EINVAL);
2276	}
2277
2278	sc->tx_itr = requested_tx_itr;
2279	iavf_configure_tx_itr(sc);
2280
2281	return (error);
2282}
2283
2284/*
2285 * Used to set the Rx ITR value for all of the VF's queues.
2286 * Writes to the ITR registers immediately.
2287 */
2288static int
2289iavf_sysctl_rx_itr(SYSCTL_HANDLER_ARGS)
2290{
2291	struct iavf_sc *sc = (struct iavf_sc *)arg1;
2292	device_t dev = sc->dev;
2293	int requested_rx_itr;
2294	int error = 0;
2295
2296	requested_rx_itr = sc->rx_itr;
2297	error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
2298	if ((error) || (req->newptr == NULL))
2299		return (error);
2300	if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
2301		device_printf(dev,
2302		    "Invalid RX itr value; value must be between 0 and %d\n",
2303		        IXL_MAX_ITR);
2304		return (EINVAL);
2305	}
2306
2307	sc->rx_itr = requested_rx_itr;
2308	iavf_configure_rx_itr(sc);
2309
2310	return (error);
2311}
2312
2313static int
2314iavf_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
2315{
2316	struct iavf_sc *sc = (struct iavf_sc *)arg1;
2317	struct iavf_mac_filter *f;
2318	struct iavf_vlan_filter *v;
2319	device_t dev = sc->dev;
2320	int ftl_len, ftl_counter = 0, error = 0;
2321	struct sbuf *buf;
2322
2323	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2324	if (!buf) {
2325		device_printf(dev, "Could not allocate sbuf for output.\n");
2326		return (ENOMEM);
2327	}
2328
2329	sbuf_printf(buf, "\n");
2330
2331	/* Print MAC filters */
2332	sbuf_printf(buf, "MAC Filters:\n");
2333	ftl_len = 0;
2334	SLIST_FOREACH(f, sc->mac_filters, next)
2335		ftl_len++;
2336	if (ftl_len < 1)
2337		sbuf_printf(buf, "(none)\n");
2338	else {
2339		SLIST_FOREACH(f, sc->mac_filters, next) {
2340			sbuf_printf(buf,
2341			    MAC_FORMAT ", flags %#06x\n",
2342			    MAC_FORMAT_ARGS(f->macaddr), f->flags);
2343		}
2344	}
2345
2346	/* Print VLAN filters */
2347	sbuf_printf(buf, "VLAN Filters:\n");
2348	ftl_len = 0;
2349	SLIST_FOREACH(v, sc->vlan_filters, next)
2350		ftl_len++;
2351	if (ftl_len < 1)
2352		sbuf_printf(buf, "(none)");
2353	else {
2354		SLIST_FOREACH(v, sc->vlan_filters, next) {
2355			sbuf_printf(buf,
2356			    "%d, flags %#06x",
2357			    v->vlan, v->flags);
2358			/* don't print '\n' for last entry */
2359			if (++ftl_counter != ftl_len)
2360				sbuf_printf(buf, "\n");
2361		}
2362	}
2363
2364	error = sbuf_finish(buf);
2365	if (error)
2366		device_printf(dev, "Error finishing sbuf: %d\n", error);
2367
2368	sbuf_delete(buf);
2369	return (error);
2370}
2371
2372/*
2373 * Print out mapping of TX queue indexes and Rx queue indexes
2374 * to MSI-X vectors.
2375 */
2376static int
2377iavf_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
2378{
2379	struct iavf_sc *sc = (struct iavf_sc *)arg1;
2380	struct ixl_vsi *vsi = &sc->vsi;
2381	device_t dev = sc->dev;
2382	struct sbuf *buf;
2383	int error = 0;
2384
2385	struct ixl_rx_queue *rx_que = vsi->rx_queues;
2386	struct ixl_tx_queue *tx_que = vsi->tx_queues;
2387
2388	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2389	if (!buf) {
2390		device_printf(dev, "Could not allocate sbuf for output.\n");
2391		return (ENOMEM);
2392	}
2393
2394	sbuf_cat(buf, "\n");
2395	for (int i = 0; i < vsi->num_rx_queues; i++) {
2396		rx_que = &vsi->rx_queues[i];
2397		sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
2398	}
2399	for (int i = 0; i < vsi->num_tx_queues; i++) {
2400		tx_que = &vsi->tx_queues[i];
2401		sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
2402	}
2403
2404	error = sbuf_finish(buf);
2405	if (error)
2406		device_printf(dev, "Error finishing sbuf: %d\n", error);
2407	sbuf_delete(buf);
2408
2409	return (error);
2410}
2411
2412#define CTX_ACTIVE(ctx) ((if_getdrvflags(iflib_get_ifp(ctx)) & IFF_DRV_RUNNING))
2413static int
2414iavf_sysctl_vf_reset(SYSCTL_HANDLER_ARGS)
2415{
2416	struct iavf_sc *sc = (struct iavf_sc *)arg1;
2417	int do_reset = 0, error = 0;
2418
2419	error = sysctl_handle_int(oidp, &do_reset, 0, req);
2420	if ((error) || (req->newptr == NULL))
2421		return (error);
2422
2423	if (do_reset == 1) {
2424		iavf_reset(sc);
2425		if (CTX_ACTIVE(sc->vsi.ctx))
2426			iflib_request_reset(sc->vsi.ctx);
2427	}
2428
2429	return (error);
2430}
2431
2432static int
2433iavf_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS)
2434{
2435	struct iavf_sc *sc = (struct iavf_sc *)arg1;
2436	device_t dev = sc->dev;
2437	int do_reset = 0, error = 0;
2438
2439	error = sysctl_handle_int(oidp, &do_reset, 0, req);
2440	if ((error) || (req->newptr == NULL))
2441		return (error);
2442
2443	if (do_reset == 1) {
2444		if (!pcie_flr(dev, max(pcie_get_max_completion_timeout(dev) / 1000, 10), true)) {
2445			device_printf(dev, "PCIE FLR failed\n");
2446			error = EIO;
2447		}
2448		else if (CTX_ACTIVE(sc->vsi.ctx))
2449			iflib_request_reset(sc->vsi.ctx);
2450	}
2451
2452	return (error);
2453}
2454#undef CTX_ACTIVE
2455