162dadd65SYuri Pankov /*
2ca5345b6SSebastien Roy  * Copyright (C) 2007-2014 VMware, Inc. All rights reserved.
362dadd65SYuri Pankov  *
462dadd65SYuri Pankov  * The contents of this file are subject to the terms of the Common
562dadd65SYuri Pankov  * Development and Distribution License (the "License") version 1.0
662dadd65SYuri Pankov  * and no later version.  You may not use this file except in
762dadd65SYuri Pankov  * compliance with the License.
862dadd65SYuri Pankov  *
962dadd65SYuri Pankov  * You can obtain a copy of the License at
1062dadd65SYuri Pankov  *         http://www.opensource.org/licenses/cddl1.php
1162dadd65SYuri Pankov  *
1262dadd65SYuri Pankov  * See the License for the specific language governing permissions
1362dadd65SYuri Pankov  * and limitations under the License.
1462dadd65SYuri Pankov  */
1562dadd65SYuri Pankov 
1662dadd65SYuri Pankov /*
176849994eSSebastien Roy  * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
1862dadd65SYuri Pankov  */
1962dadd65SYuri Pankov 
2062dadd65SYuri Pankov #include <vmxnet3.h>
2162dadd65SYuri Pankov 
22ca5345b6SSebastien Roy /*
23ca5345b6SSebastien Roy  * This driver is based on VMware's version 3227872, and contains additional
24ca5345b6SSebastien Roy  * enhancements (see README.txt).
25ca5345b6SSebastien Roy  */
26ca5345b6SSebastien Roy #define	BUILD_NUMBER_NUMERIC	3227872
2762dadd65SYuri Pankov 
286849994eSSebastien Roy /*
296849994eSSebastien Roy  * If we run out of rxPool buffers, only allocate if the MTU is <= PAGESIZE
306849994eSSebastien Roy  * so that we don't have to incur the cost of allocating multiple contiguous
316849994eSSebastien Roy  * pages (very slow) in interrupt context.
326849994eSSebastien Roy  */
336849994eSSebastien Roy #define	VMXNET3_ALLOC_OK(dp)	((dp)->cur_mtu <= PAGESIZE)
346849994eSSebastien Roy 
3562dadd65SYuri Pankov /*
3662dadd65SYuri Pankov  * TODO:
3762dadd65SYuri Pankov  *    - Tx data ring
3862dadd65SYuri Pankov  *    - MAC_CAPAB_POLL support
3962dadd65SYuri Pankov  *    - Dynamic RX pool
4062dadd65SYuri Pankov  */
4162dadd65SYuri Pankov 
4262dadd65SYuri Pankov static int vmxnet3_getstat(void *, uint_t, uint64_t *);
4362dadd65SYuri Pankov static int vmxnet3_start(void *);
4462dadd65SYuri Pankov static void vmxnet3_stop(void *);
4562dadd65SYuri Pankov static int vmxnet3_setpromisc(void *, boolean_t);
4662dadd65SYuri Pankov static void vmxnet3_ioctl(void *arg, queue_t *wq, mblk_t *mp);
4762dadd65SYuri Pankov static int vmxnet3_multicst(void *, boolean_t, const uint8_t *);
4862dadd65SYuri Pankov static int vmxnet3_unicst(void *, const uint8_t *);
4962dadd65SYuri Pankov static boolean_t vmxnet3_getcapab(void *, mac_capab_t, void *);
50ca5345b6SSebastien Roy static int vmxnet3_get_prop(void *, const char *, mac_prop_id_t, uint_t,
51ca5345b6SSebastien Roy     void *);
52ca5345b6SSebastien Roy static int vmxnet3_set_prop(void *, const char *, mac_prop_id_t, uint_t,
5362dadd65SYuri Pankov     const void *);
54ca5345b6SSebastien Roy static void vmxnet3_prop_info(void *, const char *, mac_prop_id_t,
5562dadd65SYuri Pankov     mac_prop_info_handle_t);
5662dadd65SYuri Pankov 
5762dadd65SYuri Pankov int vmxnet3s_debug = 0;
5862dadd65SYuri Pankov 
5962dadd65SYuri Pankov /* MAC callbacks */
6062dadd65SYuri Pankov static mac_callbacks_t vmxnet3_mac_callbacks = {
6162dadd65SYuri Pankov 	.mc_callbacks =	MC_GETCAPAB | MC_IOCTL | MC_SETPROP | MC_PROPINFO,
6262dadd65SYuri Pankov 	.mc_getstat =	vmxnet3_getstat,
6362dadd65SYuri Pankov 	.mc_start =	vmxnet3_start,
6462dadd65SYuri Pankov 	.mc_stop =	vmxnet3_stop,
6562dadd65SYuri Pankov 	.mc_setpromisc = vmxnet3_setpromisc,
6662dadd65SYuri Pankov 	.mc_multicst =	vmxnet3_multicst,
6762dadd65SYuri Pankov 	.mc_unicst =	vmxnet3_unicst,
6862dadd65SYuri Pankov 	.mc_tx =	vmxnet3_tx,
6962dadd65SYuri Pankov 	.mc_ioctl =	vmxnet3_ioctl,
7062dadd65SYuri Pankov 	.mc_getcapab =	vmxnet3_getcapab,
71ca5345b6SSebastien Roy 	.mc_getprop =	vmxnet3_get_prop,
72ca5345b6SSebastien Roy 	.mc_setprop =	vmxnet3_set_prop,
73ca5345b6SSebastien Roy 	.mc_propinfo =	vmxnet3_prop_info
7462dadd65SYuri Pankov };
7562dadd65SYuri Pankov 
7662dadd65SYuri Pankov /* Tx DMA engine description */
7762dadd65SYuri Pankov static ddi_dma_attr_t vmxnet3_dma_attrs_tx = {
7862dadd65SYuri Pankov 	.dma_attr_version =	DMA_ATTR_V0,
7962dadd65SYuri Pankov 	.dma_attr_addr_lo =	0x0000000000000000ull,
8062dadd65SYuri Pankov 	.dma_attr_addr_hi =	0xFFFFFFFFFFFFFFFFull,
8162dadd65SYuri Pankov 	.dma_attr_count_max =	0xFFFFFFFFFFFFFFFFull,
8262dadd65SYuri Pankov 	.dma_attr_align =	0x0000000000000001ull,
8362dadd65SYuri Pankov 	.dma_attr_burstsizes =	0x0000000000000001ull,
8462dadd65SYuri Pankov 	.dma_attr_minxfer =	0x00000001,
8562dadd65SYuri Pankov 	.dma_attr_maxxfer =	0x000000000000FFFFull,
8662dadd65SYuri Pankov 	.dma_attr_seg =		0xFFFFFFFFFFFFFFFFull,
8762dadd65SYuri Pankov 	.dma_attr_sgllen =	-1,
8862dadd65SYuri Pankov 	.dma_attr_granular =	0x00000001,
8962dadd65SYuri Pankov 	.dma_attr_flags =	0
9062dadd65SYuri Pankov };
9162dadd65SYuri Pankov 
9262dadd65SYuri Pankov /* --- */
9362dadd65SYuri Pankov 
9462dadd65SYuri Pankov /*
95ca5345b6SSebastien Roy  * Fetch the statistics of a vmxnet3 device.
9662dadd65SYuri Pankov  *
97ca5345b6SSebastien Roy  * Returns:
986849994eSSebastien Roy  *	0 on success, non-zero on failure.
9962dadd65SYuri Pankov  */
10062dadd65SYuri Pankov static int
vmxnet3_getstat(void * data,uint_t stat,uint64_t * val)10162dadd65SYuri Pankov vmxnet3_getstat(void *data, uint_t stat, uint64_t *val)
10262dadd65SYuri Pankov {
10362dadd65SYuri Pankov 	vmxnet3_softc_t *dp = data;
10462dadd65SYuri Pankov 	UPT1_TxStats *txStats;
10562dadd65SYuri Pankov 	UPT1_RxStats *rxStats;
10662dadd65SYuri Pankov 
10762dadd65SYuri Pankov 	VMXNET3_DEBUG(dp, 3, "getstat(%u)\n", stat);
10862dadd65SYuri Pankov 
10962dadd65SYuri Pankov 	if (!dp->devEnabled) {
1106849994eSSebastien Roy 		return (EBUSY);
11162dadd65SYuri Pankov 	}
11262dadd65SYuri Pankov 
11362dadd65SYuri Pankov 	txStats = &VMXNET3_TQDESC(dp)->stats;
11462dadd65SYuri Pankov 	rxStats = &VMXNET3_RQDESC(dp)->stats;
11562dadd65SYuri Pankov 
11662dadd65SYuri Pankov 	/*
11762dadd65SYuri Pankov 	 * First touch the related register
11862dadd65SYuri Pankov 	 */
11962dadd65SYuri Pankov 	switch (stat) {
12062dadd65SYuri Pankov 	case MAC_STAT_MULTIRCV:
12162dadd65SYuri Pankov 	case MAC_STAT_BRDCSTRCV:
12262dadd65SYuri Pankov 	case MAC_STAT_MULTIXMT:
12362dadd65SYuri Pankov 	case MAC_STAT_BRDCSTXMT:
12462dadd65SYuri Pankov 	case MAC_STAT_NORCVBUF:
12562dadd65SYuri Pankov 	case MAC_STAT_IERRORS:
12662dadd65SYuri Pankov 	case MAC_STAT_NOXMTBUF:
12762dadd65SYuri Pankov 	case MAC_STAT_OERRORS:
12862dadd65SYuri Pankov 	case MAC_STAT_RBYTES:
12962dadd65SYuri Pankov 	case MAC_STAT_IPACKETS:
13062dadd65SYuri Pankov 	case MAC_STAT_OBYTES:
13162dadd65SYuri Pankov 	case MAC_STAT_OPACKETS:
13262dadd65SYuri Pankov 		VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
13362dadd65SYuri Pankov 		break;
13462dadd65SYuri Pankov 	case MAC_STAT_IFSPEED:
13562dadd65SYuri Pankov 	case MAC_STAT_COLLISIONS:
13662dadd65SYuri Pankov 	case ETHER_STAT_LINK_DUPLEX:
13762dadd65SYuri Pankov 		/* nothing */
13862dadd65SYuri Pankov 		break;
13962dadd65SYuri Pankov 	default:
1406849994eSSebastien Roy 		return (ENOTSUP);
14162dadd65SYuri Pankov 	}
14262dadd65SYuri Pankov 
14362dadd65SYuri Pankov 	/*
14462dadd65SYuri Pankov 	 * Then fetch the corresponding stat
14562dadd65SYuri Pankov 	 */
14662dadd65SYuri Pankov 	switch (stat) {
14762dadd65SYuri Pankov 	case MAC_STAT_IFSPEED:
14862dadd65SYuri Pankov 		*val = dp->linkSpeed;
14962dadd65SYuri Pankov 		break;
15062dadd65SYuri Pankov 	case MAC_STAT_MULTIRCV:
15162dadd65SYuri Pankov 		*val = rxStats->mcastPktsRxOK;
15262dadd65SYuri Pankov 		break;
15362dadd65SYuri Pankov 	case MAC_STAT_BRDCSTRCV:
15462dadd65SYuri Pankov 		*val = rxStats->bcastPktsRxOK;
15562dadd65SYuri Pankov 		break;
15662dadd65SYuri Pankov 	case MAC_STAT_MULTIXMT:
15762dadd65SYuri Pankov 		*val = txStats->mcastPktsTxOK;
15862dadd65SYuri Pankov 		break;
15962dadd65SYuri Pankov 	case MAC_STAT_BRDCSTXMT:
16062dadd65SYuri Pankov 		*val = txStats->bcastPktsTxOK;
16162dadd65SYuri Pankov 		break;
16262dadd65SYuri Pankov 	case MAC_STAT_NORCVBUF:
16362dadd65SYuri Pankov 		*val = rxStats->pktsRxOutOfBuf + dp->rx_alloc_failed;
16462dadd65SYuri Pankov 		break;
16562dadd65SYuri Pankov 	case MAC_STAT_IERRORS:
16662dadd65SYuri Pankov 		*val = rxStats->pktsRxError;
16762dadd65SYuri Pankov 		break;
16862dadd65SYuri Pankov 	case MAC_STAT_NOXMTBUF:
16962dadd65SYuri Pankov 		*val = txStats->pktsTxDiscard + dp->tx_pullup_failed;
17062dadd65SYuri Pankov 		break;
17162dadd65SYuri Pankov 	case MAC_STAT_OERRORS:
17262dadd65SYuri Pankov 		*val = txStats->pktsTxError + dp->tx_error;
17362dadd65SYuri Pankov 		break;
17462dadd65SYuri Pankov 	case MAC_STAT_COLLISIONS:
17562dadd65SYuri Pankov 		*val = 0;
17662dadd65SYuri Pankov 		break;
17762dadd65SYuri Pankov 	case MAC_STAT_RBYTES:
17862dadd65SYuri Pankov 		*val = rxStats->ucastBytesRxOK + rxStats->mcastBytesRxOK +
17962dadd65SYuri Pankov 		    rxStats->bcastBytesRxOK;
18062dadd65SYuri Pankov 		break;
18162dadd65SYuri Pankov 	case MAC_STAT_IPACKETS:
18262dadd65SYuri Pankov 		*val = rxStats->ucastPktsRxOK + rxStats->mcastPktsRxOK +
18362dadd65SYuri Pankov 		    rxStats->bcastPktsRxOK;
18462dadd65SYuri Pankov 		break;
18562dadd65SYuri Pankov 	case MAC_STAT_OBYTES:
18662dadd65SYuri Pankov 		*val = txStats->ucastBytesTxOK + txStats->mcastBytesTxOK +
18762dadd65SYuri Pankov 		    txStats->bcastBytesTxOK;
18862dadd65SYuri Pankov 		break;
18962dadd65SYuri Pankov 	case MAC_STAT_OPACKETS:
19062dadd65SYuri Pankov 		*val = txStats->ucastPktsTxOK + txStats->mcastPktsTxOK +
19162dadd65SYuri Pankov 		    txStats->bcastPktsTxOK;
19262dadd65SYuri Pankov 		break;
19362dadd65SYuri Pankov 	case ETHER_STAT_LINK_DUPLEX:
19462dadd65SYuri Pankov 		*val = LINK_DUPLEX_FULL;
19562dadd65SYuri Pankov 		break;
19662dadd65SYuri Pankov 	default:
19762dadd65SYuri Pankov 		ASSERT(B_FALSE);
19862dadd65SYuri Pankov 	}
19962dadd65SYuri Pankov 
2006849994eSSebastien Roy 	return (0);
20162dadd65SYuri Pankov }
20262dadd65SYuri Pankov 
20362dadd65SYuri Pankov /*
204ca5345b6SSebastien Roy  * Allocate and initialize the shared data structures of a vmxnet3 device.
20562dadd65SYuri Pankov  *
206ca5345b6SSebastien Roy  * Returns:
2076849994eSSebastien Roy  *	0 on sucess, non-zero on failure.
20862dadd65SYuri Pankov  */
20962dadd65SYuri Pankov static int
vmxnet3_prepare_drivershared(vmxnet3_softc_t * dp)21062dadd65SYuri Pankov vmxnet3_prepare_drivershared(vmxnet3_softc_t *dp)
21162dadd65SYuri Pankov {
21262dadd65SYuri Pankov 	Vmxnet3_DriverShared *ds;
21362dadd65SYuri Pankov 	size_t allocSize = sizeof (Vmxnet3_DriverShared);
2146849994eSSebastien Roy 	int err;
21562dadd65SYuri Pankov 
2166849994eSSebastien Roy 	if ((err = vmxnet3_alloc_dma_mem_1(dp, &dp->sharedData, allocSize,
2176849994eSSebastien Roy 	    B_TRUE)) != 0) {
2186849994eSSebastien Roy 		return (err);
21962dadd65SYuri Pankov 	}
22062dadd65SYuri Pankov 	ds = VMXNET3_DS(dp);
22162dadd65SYuri Pankov 	(void) memset(ds, 0, allocSize);
22262dadd65SYuri Pankov 
22362dadd65SYuri Pankov 	allocSize = sizeof (Vmxnet3_TxQueueDesc) + sizeof (Vmxnet3_RxQueueDesc);
2246849994eSSebastien Roy 	if ((err = vmxnet3_alloc_dma_mem_128(dp, &dp->queueDescs, allocSize,
2256849994eSSebastien Roy 	    B_TRUE)) != 0) {
22662dadd65SYuri Pankov 		vmxnet3_free_dma_mem(&dp->sharedData);
2276849994eSSebastien Roy 		return (err);
22862dadd65SYuri Pankov 	}
22962dadd65SYuri Pankov 	(void) memset(dp->queueDescs.buf, 0, allocSize);
23062dadd65SYuri Pankov 
23162dadd65SYuri Pankov 	ds->magic = VMXNET3_REV1_MAGIC;
23262dadd65SYuri Pankov 
23362dadd65SYuri Pankov 	/* Take care of most of devRead */
23462dadd65SYuri Pankov 	ds->devRead.misc.driverInfo.version = BUILD_NUMBER_NUMERIC;
23562dadd65SYuri Pankov #ifdef _LP64
23662dadd65SYuri Pankov 	ds->devRead.misc.driverInfo.gos.gosBits = VMXNET3_GOS_BITS_64;
23762dadd65SYuri Pankov #else
23862dadd65SYuri Pankov 	ds->devRead.misc.driverInfo.gos.gosBits = VMXNET3_GOS_BITS_32;
23962dadd65SYuri Pankov #endif
24062dadd65SYuri Pankov 	ds->devRead.misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_SOLARIS;
24162dadd65SYuri Pankov 	ds->devRead.misc.driverInfo.gos.gosVer = 10;
24262dadd65SYuri Pankov 	ds->devRead.misc.driverInfo.vmxnet3RevSpt = 1;
24362dadd65SYuri Pankov 	ds->devRead.misc.driverInfo.uptVerSpt = 1;
24462dadd65SYuri Pankov 
24562dadd65SYuri Pankov 	ds->devRead.misc.uptFeatures = UPT1_F_RXCSUM;
24662dadd65SYuri Pankov 	ds->devRead.misc.mtu = dp->cur_mtu;
24762dadd65SYuri Pankov 
24862dadd65SYuri Pankov 	/* XXX: ds->devRead.misc.maxNumRxSG */
24962dadd65SYuri Pankov 	ds->devRead.misc.numTxQueues = 1;
25062dadd65SYuri Pankov 	ds->devRead.misc.numRxQueues = 1;
25162dadd65SYuri Pankov 	ds->devRead.misc.queueDescPA = dp->queueDescs.bufPA;
25262dadd65SYuri Pankov 	ds->devRead.misc.queueDescLen = allocSize;
25362dadd65SYuri Pankov 
25462dadd65SYuri Pankov 	/* TxQueue and RxQueue information is filled in other functions */
25562dadd65SYuri Pankov 	ds->devRead.intrConf.autoMask = (dp->intrMaskMode == VMXNET3_IMM_AUTO);
25662dadd65SYuri Pankov 	ds->devRead.intrConf.numIntrs = 1;
25762dadd65SYuri Pankov 	/* XXX: ds->intr.modLevels */
25862dadd65SYuri Pankov 	ds->devRead.intrConf.eventIntrIdx = 0;
25962dadd65SYuri Pankov 
26062dadd65SYuri Pankov 	VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_DSAL,
26162dadd65SYuri Pankov 	    VMXNET3_ADDR_LO(dp->sharedData.bufPA));
26262dadd65SYuri Pankov 	VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_DSAH,
26362dadd65SYuri Pankov 	    VMXNET3_ADDR_HI(dp->sharedData.bufPA));
26462dadd65SYuri Pankov 
2656849994eSSebastien Roy 	return (0);
26662dadd65SYuri Pankov }
26762dadd65SYuri Pankov 
26862dadd65SYuri Pankov /*
269ca5345b6SSebastien Roy  * Destroy the shared data structures of a vmxnet3 device.
27062dadd65SYuri Pankov  */
27162dadd65SYuri Pankov static void
vmxnet3_destroy_drivershared(vmxnet3_softc_t * dp)27262dadd65SYuri Pankov vmxnet3_destroy_drivershared(vmxnet3_softc_t *dp)
27362dadd65SYuri Pankov {
27462dadd65SYuri Pankov 	VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_DSAL, 0);
27562dadd65SYuri Pankov 	VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_DSAH, 0);
27662dadd65SYuri Pankov 
27762dadd65SYuri Pankov 	vmxnet3_free_dma_mem(&dp->queueDescs);
27862dadd65SYuri Pankov 	vmxnet3_free_dma_mem(&dp->sharedData);
27962dadd65SYuri Pankov }
28062dadd65SYuri Pankov 
28162dadd65SYuri Pankov /*
282ca5345b6SSebastien Roy  * Allocate and initialize the command ring of a queue.
28362dadd65SYuri Pankov  *
284ca5345b6SSebastien Roy  * Returns:
2856849994eSSebastien Roy  *	0 on success, non-zero on error.
28662dadd65SYuri Pankov  */
28762dadd65SYuri Pankov static int
vmxnet3_alloc_cmdring(vmxnet3_softc_t * dp,vmxnet3_cmdring_t * cmdRing)28862dadd65SYuri Pankov vmxnet3_alloc_cmdring(vmxnet3_softc_t *dp, vmxnet3_cmdring_t *cmdRing)
28962dadd65SYuri Pankov {
29062dadd65SYuri Pankov 	size_t ringSize = cmdRing->size * sizeof (Vmxnet3_TxDesc);
2916849994eSSebastien Roy 	int err;
29262dadd65SYuri Pankov 
2936849994eSSebastien Roy 	if ((err = vmxnet3_alloc_dma_mem_512(dp, &cmdRing->dma, ringSize,
2946849994eSSebastien Roy 	    B_TRUE)) != 0) {
2956849994eSSebastien Roy 		return (err);
29662dadd65SYuri Pankov 	}
29762dadd65SYuri Pankov 	(void) memset(cmdRing->dma.buf, 0, ringSize);
29862dadd65SYuri Pankov 	cmdRing->avail = cmdRing->size;
29962dadd65SYuri Pankov 	cmdRing->next2fill = 0;
30062dadd65SYuri Pankov 	cmdRing->gen = VMXNET3_INIT_GEN;
30162dadd65SYuri Pankov 
3026849994eSSebastien Roy 	return (0);
30362dadd65SYuri Pankov }
30462dadd65SYuri Pankov 
30562dadd65SYuri Pankov /*
306ca5345b6SSebastien Roy  * Allocate and initialize the completion ring of a queue.
30762dadd65SYuri Pankov  *
308ca5345b6SSebastien Roy  * Returns:
30962dadd65SYuri Pankov  *    DDI_SUCCESS or DDI_FAILURE.
31062dadd65SYuri Pankov  */
31162dadd65SYuri Pankov static int
vmxnet3_alloc_compring(vmxnet3_softc_t * dp,vmxnet3_compring_t * compRing)31262dadd65SYuri Pankov vmxnet3_alloc_compring(vmxnet3_softc_t *dp, vmxnet3_compring_t *compRing)
31362dadd65SYuri Pankov {
31462dadd65SYuri Pankov 	size_t ringSize = compRing->size * sizeof (Vmxnet3_TxCompDesc);
31562dadd65SYuri Pankov 
31662dadd65SYuri Pankov 	if (vmxnet3_alloc_dma_mem_512(dp, &compRing->dma, ringSize,
31762dadd65SYuri Pankov 	    B_TRUE) != DDI_SUCCESS) {
31862dadd65SYuri Pankov 		return (DDI_FAILURE);
31962dadd65SYuri Pankov 	}
32062dadd65SYuri Pankov 	(void) memset(compRing->dma.buf, 0, ringSize);
32162dadd65SYuri Pankov 	compRing->next2comp = 0;
32262dadd65SYuri Pankov 	compRing->gen = VMXNET3_INIT_GEN;
32362dadd65SYuri Pankov 
32462dadd65SYuri Pankov 	return (DDI_SUCCESS);
32562dadd65SYuri Pankov }
32662dadd65SYuri Pankov 
32762dadd65SYuri Pankov /*
328ca5345b6SSebastien Roy  * Initialize the tx queue of a vmxnet3 device.
32962dadd65SYuri Pankov  *
330ca5345b6SSebastien Roy  * Returns:
3316849994eSSebastien Roy  *	0 on success, non-zero on failure.
33262dadd65SYuri Pankov  */
33362dadd65SYuri Pankov static int
vmxnet3_prepare_txqueue(vmxnet3_softc_t * dp)33462dadd65SYuri Pankov vmxnet3_prepare_txqueue(vmxnet3_softc_t *dp)
33562dadd65SYuri Pankov {
33662dadd65SYuri Pankov 	Vmxnet3_TxQueueDesc *tqdesc = VMXNET3_TQDESC(dp);
33762dadd65SYuri Pankov 	vmxnet3_txqueue_t *txq = &dp->txQueue;
3386849994eSSebastien Roy 	int err;
33962dadd65SYuri Pankov 
34062dadd65SYuri Pankov 	ASSERT(!(txq->cmdRing.size & VMXNET3_RING_SIZE_MASK));
34162dadd65SYuri Pankov 	ASSERT(!(txq->compRing.size & VMXNET3_RING_SIZE_MASK));
34262dadd65SYuri Pankov 	ASSERT(!txq->cmdRing.dma.buf && !txq->compRing.dma.buf);
34362dadd65SYuri Pankov 
3446849994eSSebastien Roy 	if ((err = vmxnet3_alloc_cmdring(dp, &txq->cmdRing)) != 0) {
34562dadd65SYuri Pankov 		goto error;
34662dadd65SYuri Pankov 	}
34762dadd65SYuri Pankov 	tqdesc->conf.txRingBasePA = txq->cmdRing.dma.bufPA;
34862dadd65SYuri Pankov 	tqdesc->conf.txRingSize = txq->cmdRing.size;
34962dadd65SYuri Pankov 	tqdesc->conf.dataRingBasePA = 0;
35062dadd65SYuri Pankov 	tqdesc->conf.dataRingSize = 0;
35162dadd65SYuri Pankov 
3526849994eSSebastien Roy 	if ((err = vmxnet3_alloc_compring(dp, &txq->compRing)) != 0) {
35362dadd65SYuri Pankov 		goto error_cmdring;
35462dadd65SYuri Pankov 	}
35562dadd65SYuri Pankov 	tqdesc->conf.compRingBasePA = txq->compRing.dma.bufPA;
35662dadd65SYuri Pankov 	tqdesc->conf.compRingSize = txq->compRing.size;
35762dadd65SYuri Pankov 
35862dadd65SYuri Pankov 	txq->metaRing = kmem_zalloc(txq->cmdRing.size *
35962dadd65SYuri Pankov 	    sizeof (vmxnet3_metatx_t), KM_SLEEP);
36062dadd65SYuri Pankov 	ASSERT(txq->metaRing);
36162dadd65SYuri Pankov 
3626849994eSSebastien Roy 	if ((err = vmxnet3_txqueue_init(dp, txq)) != 0) {
36362dadd65SYuri Pankov 		goto error_mpring;
36462dadd65SYuri Pankov 	}
36562dadd65SYuri Pankov 
3666849994eSSebastien Roy 	return (0);
36762dadd65SYuri Pankov 
36862dadd65SYuri Pankov error_mpring:
36962dadd65SYuri Pankov 	kmem_free(txq->metaRing, txq->cmdRing.size * sizeof (vmxnet3_metatx_t));
37062dadd65SYuri Pankov 	vmxnet3_free_dma_mem(&txq->compRing.dma);
37162dadd65SYuri Pankov error_cmdring:
37262dadd65SYuri Pankov 	vmxnet3_free_dma_mem(&txq->cmdRing.dma);
37362dadd65SYuri Pankov error:
3746849994eSSebastien Roy 	return (err);
37562dadd65SYuri Pankov }
37662dadd65SYuri Pankov 
37762dadd65SYuri Pankov /*
378ca5345b6SSebastien Roy  * Initialize the rx queue of a vmxnet3 device.
37962dadd65SYuri Pankov  *
380ca5345b6SSebastien Roy  * Returns:
3816849994eSSebastien Roy  *	0 on success, non-zero on failure.
38262dadd65SYuri Pankov  */
38362dadd65SYuri Pankov static int
vmxnet3_prepare_rxqueue(vmxnet3_softc_t * dp)38462dadd65SYuri Pankov vmxnet3_prepare_rxqueue(vmxnet3_softc_t *dp)
38562dadd65SYuri Pankov {
38662dadd65SYuri Pankov 	Vmxnet3_RxQueueDesc *rqdesc = VMXNET3_RQDESC(dp);
38762dadd65SYuri Pankov 	vmxnet3_rxqueue_t *rxq = &dp->rxQueue;
3886849994eSSebastien Roy 	int err = 0;
38962dadd65SYuri Pankov 
39062dadd65SYuri Pankov 	ASSERT(!(rxq->cmdRing.size & VMXNET3_RING_SIZE_MASK));
39162dadd65SYuri Pankov 	ASSERT(!(rxq->compRing.size & VMXNET3_RING_SIZE_MASK));
39262dadd65SYuri Pankov 	ASSERT(!rxq->cmdRing.dma.buf && !rxq->compRing.dma.buf);
39362dadd65SYuri Pankov 
3946849994eSSebastien Roy 	if ((err = vmxnet3_alloc_cmdring(dp, &rxq->cmdRing)) != 0) {
39562dadd65SYuri Pankov 		goto error;
39662dadd65SYuri Pankov 	}
39762dadd65SYuri Pankov 	rqdesc->conf.rxRingBasePA[0] = rxq->cmdRing.dma.bufPA;
39862dadd65SYuri Pankov 	rqdesc->conf.rxRingSize[0] = rxq->cmdRing.size;
39962dadd65SYuri Pankov 	rqdesc->conf.rxRingBasePA[1] = 0;
40062dadd65SYuri Pankov 	rqdesc->conf.rxRingSize[1] = 0;
40162dadd65SYuri Pankov 
4026849994eSSebastien Roy 	if ((err = vmxnet3_alloc_compring(dp, &rxq->compRing)) != 0) {
40362dadd65SYuri Pankov 		goto error_cmdring;
40462dadd65SYuri Pankov 	}
40562dadd65SYuri Pankov 	rqdesc->conf.compRingBasePA = rxq->compRing.dma.bufPA;
40662dadd65SYuri Pankov 	rqdesc->conf.compRingSize = rxq->compRing.size;
40762dadd65SYuri Pankov 
40862dadd65SYuri Pankov 	rxq->bufRing = kmem_zalloc(rxq->cmdRing.size *
40962dadd65SYuri Pankov 	    sizeof (vmxnet3_bufdesc_t), KM_SLEEP);
41062dadd65SYuri Pankov 	ASSERT(rxq->bufRing);
41162dadd65SYuri Pankov 
4126849994eSSebastien Roy 	if ((err = vmxnet3_rxqueue_init(dp, rxq)) != 0) {
41362dadd65SYuri Pankov 		goto error_bufring;
41462dadd65SYuri Pankov 	}
41562dadd65SYuri Pankov 
4166849994eSSebastien Roy 	return (0);
41762dadd65SYuri Pankov 
41862dadd65SYuri Pankov error_bufring:
41962dadd65SYuri Pankov 	kmem_free(rxq->bufRing, rxq->cmdRing.size * sizeof (vmxnet3_bufdesc_t));
42062dadd65SYuri Pankov 	vmxnet3_free_dma_mem(&rxq->compRing.dma);
42162dadd65SYuri Pankov error_cmdring:
42262dadd65SYuri Pankov 	vmxnet3_free_dma_mem(&rxq->cmdRing.dma);
42362dadd65SYuri Pankov error:
4246849994eSSebastien Roy 	return (err);
42562dadd65SYuri Pankov }
42662dadd65SYuri Pankov 
42762dadd65SYuri Pankov /*
428ca5345b6SSebastien Roy  * Destroy the tx queue of a vmxnet3 device.
42962dadd65SYuri Pankov  */
43062dadd65SYuri Pankov static void
vmxnet3_destroy_txqueue(vmxnet3_softc_t * dp)43162dadd65SYuri Pankov vmxnet3_destroy_txqueue(vmxnet3_softc_t *dp)
43262dadd65SYuri Pankov {
43362dadd65SYuri Pankov 	vmxnet3_txqueue_t *txq = &dp->txQueue;
43462dadd65SYuri Pankov 
43562dadd65SYuri Pankov 	ASSERT(txq->metaRing);
43662dadd65SYuri Pankov 	ASSERT(txq->cmdRing.dma.buf && txq->compRing.dma.buf);
43762dadd65SYuri Pankov 
43862dadd65SYuri Pankov 	vmxnet3_txqueue_fini(dp, txq);
43962dadd65SYuri Pankov 
44062dadd65SYuri Pankov 	kmem_free(txq->metaRing, txq->cmdRing.size * sizeof (vmxnet3_metatx_t));
44162dadd65SYuri Pankov 
44262dadd65SYuri Pankov 	vmxnet3_free_dma_mem(&txq->cmdRing.dma);
44362dadd65SYuri Pankov 	vmxnet3_free_dma_mem(&txq->compRing.dma);
44462dadd65SYuri Pankov }
44562dadd65SYuri Pankov 
44662dadd65SYuri Pankov /*
447ca5345b6SSebastien Roy  * Destroy the rx queue of a vmxnet3 device.
44862dadd65SYuri Pankov  */
44962dadd65SYuri Pankov static void
vmxnet3_destroy_rxqueue(vmxnet3_softc_t * dp)45062dadd65SYuri Pankov vmxnet3_destroy_rxqueue(vmxnet3_softc_t *dp)
45162dadd65SYuri Pankov {
45262dadd65SYuri Pankov 	vmxnet3_rxqueue_t *rxq = &dp->rxQueue;
45362dadd65SYuri Pankov 
45462dadd65SYuri Pankov 	ASSERT(rxq->bufRing);
45562dadd65SYuri Pankov 	ASSERT(rxq->cmdRing.dma.buf && rxq->compRing.dma.buf);
45662dadd65SYuri Pankov 
45762dadd65SYuri Pankov 	vmxnet3_rxqueue_fini(dp, rxq);
45862dadd65SYuri Pankov 
45962dadd65SYuri Pankov 	kmem_free(rxq->bufRing, rxq->cmdRing.size * sizeof (vmxnet3_bufdesc_t));
46062dadd65SYuri Pankov 
46162dadd65SYuri Pankov 	vmxnet3_free_dma_mem(&rxq->cmdRing.dma);
46262dadd65SYuri Pankov 	vmxnet3_free_dma_mem(&rxq->compRing.dma);
46362dadd65SYuri Pankov }
46462dadd65SYuri Pankov 
46562dadd65SYuri Pankov /*
466ca5345b6SSebastien Roy  * Apply new RX filters settings to a vmxnet3 device.
46762dadd65SYuri Pankov  */
46862dadd65SYuri Pankov static void
vmxnet3_refresh_rxfilter(vmxnet3_softc_t * dp)46962dadd65SYuri Pankov vmxnet3_refresh_rxfilter(vmxnet3_softc_t *dp)
47062dadd65SYuri Pankov {
47162dadd65SYuri Pankov 	Vmxnet3_DriverShared *ds = VMXNET3_DS(dp);
47262dadd65SYuri Pankov 
47362dadd65SYuri Pankov 	ds->devRead.rxFilterConf.rxMode = dp->rxMode;
47562dadd65SYuri Pankov }
47662dadd65SYuri Pankov 
47762dadd65SYuri Pankov /*
478ca5345b6SSebastien Roy  * Fetch the link state of a vmxnet3 device.
47962dadd65SYuri Pankov  */
48062dadd65SYuri Pankov static void
vmxnet3_refresh_linkstate(vmxnet3_softc_t * dp)48162dadd65SYuri Pankov vmxnet3_refresh_linkstate(vmxnet3_softc_t *dp)
48262dadd65SYuri Pankov {
48362dadd65SYuri Pankov 	uint32_t ret32;
48462dadd65SYuri Pankov 
48562dadd65SYuri Pankov 	VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
48662dadd65SYuri Pankov 	ret32 = VMXNET3_BAR1_GET32(dp, VMXNET3_REG_CMD);
48762dadd65SYuri Pankov 	if (ret32 & 1) {
48862dadd65SYuri Pankov 		dp->linkState = LINK_STATE_UP;
48962dadd65SYuri Pankov 		dp->linkSpeed = (ret32 >> 16) * 1000000ULL;
49062dadd65SYuri Pankov 	} else {
49162dadd65SYuri Pankov 		dp->linkState = LINK_STATE_DOWN;
49262dadd65SYuri Pankov 		dp->linkSpeed = 0;
49362dadd65SYuri Pankov 	}
49462dadd65SYuri Pankov }
49562dadd65SYuri Pankov 
49662dadd65SYuri Pankov /*
497ca5345b6SSebastien Roy  * Start a vmxnet3 device: allocate and initialize the shared data
498ca5345b6SSebastien Roy  * structures and send a start command to the device.
49962dadd65SYuri Pankov  *
500ca5345b6SSebastien Roy  * Returns:
5016849994eSSebastien Roy  *	0 on success, non-zero error on failure.
50262dadd65SYuri Pankov  */
50362dadd65SYuri Pankov static int
vmxnet3_start(void * data)50462dadd65SYuri Pankov vmxnet3_start(void *data)
50562dadd65SYuri Pankov {
50662dadd65SYuri Pankov 	vmxnet3_softc_t *dp = data;
50762dadd65SYuri Pankov 	Vmxnet3_TxQueueDesc *tqdesc;
50862dadd65SYuri Pankov 	Vmxnet3_RxQueueDesc *rqdesc;
50962dadd65SYuri Pankov 	int txQueueSize, rxQueueSize;
51062dadd65SYuri Pankov 	uint32_t ret32;
5116849994eSSebastien Roy 	int err, dmaerr;
51262dadd65SYuri Pankov 
51362dadd65SYuri Pankov 	VMXNET3_DEBUG(dp, 1, "start()\n");
51462dadd65SYuri Pankov 
51562dadd65SYuri Pankov 	/*
51662dadd65SYuri Pankov 	 * Allocate vmxnet3's shared data and advertise its PA
51762dadd65SYuri Pankov 	 */
5186849994eSSebastien Roy 	if ((err = vmxnet3_prepare_drivershared(dp)) != 0) {
5196849994eSSebastien Roy 		VMXNET3_WARN(dp, "vmxnet3_prepare_drivershared() failed: %d",
5206849994eSSebastien Roy 		    err);
52162dadd65SYuri Pankov 		goto error;
52262dadd65SYuri Pankov 	}
52362dadd65SYuri Pankov 	tqdesc = VMXNET3_TQDESC(dp);
52462dadd65SYuri Pankov 	rqdesc = VMXNET3_RQDESC(dp);
52562dadd65SYuri Pankov 
52662dadd65SYuri Pankov 	/*
52762dadd65SYuri Pankov 	 * Create and initialize the tx queue
52862dadd65SYuri Pankov 	 */
52962dadd65SYuri Pankov 	txQueueSize = vmxnet3_getprop(dp, "TxRingSize", 32, 4096,
53062dadd65SYuri Pankov 	    VMXNET3_DEF_TX_RING_SIZE);
53162dadd65SYuri Pankov 	if (!(txQueueSize & VMXNET3_RING_SIZE_MASK)) {
53262dadd65SYuri Pankov 		dp->txQueue.cmdRing.size = txQueueSize;
53362dadd65SYuri Pankov 		dp->txQueue.compRing.size = txQueueSize;
53462dadd65SYuri Pankov 		dp->txQueue.sharedCtrl = &tqdesc->ctrl;
5356849994eSSebastien Roy 		if ((err = vmxnet3_prepare_txqueue(dp)) != 0) {
5366849994eSSebastien Roy 			VMXNET3_WARN(dp, "vmxnet3_prepare_txqueue() failed: %d",
5376849994eSSebastien Roy 			    err);
53862dadd65SYuri Pankov 			goto error_shared_data;
53962dadd65SYuri Pankov 		}
54062dadd65SYuri Pankov 	} else {
54162dadd65SYuri Pankov 		VMXNET3_WARN(dp, "invalid tx ring size (%d)\n", txQueueSize);
5426849994eSSebastien Roy 		err = EINVAL;
54362dadd65SYuri Pankov 		goto error_shared_data;
54462dadd65SYuri Pankov 	}
54562dadd65SYuri Pankov 
54662dadd65SYuri Pankov 	/*
54762dadd65SYuri Pankov 	 * Create and initialize the rx queue
54862dadd65SYuri Pankov 	 */
54962dadd65SYuri Pankov 	rxQueueSize = vmxnet3_getprop(dp, "RxRingSize", 32, 4096,
55062dadd65SYuri Pankov 	    VMXNET3_DEF_RX_RING_SIZE);
55162dadd65SYuri Pankov 	if (!(rxQueueSize & VMXNET3_RING_SIZE_MASK)) {
55262dadd65SYuri Pankov 		dp->rxQueue.cmdRing.size = rxQueueSize;
55362dadd65SYuri Pankov 		dp->rxQueue.compRing.size = rxQueueSize;
55462dadd65SYuri Pankov 		dp->rxQueue.sharedCtrl = &rqdesc->ctrl;
5556849994eSSebastien Roy 		if ((err = vmxnet3_prepare_rxqueue(dp)) != 0) {
5566849994eSSebastien Roy 			VMXNET3_WARN(dp, "vmxnet3_prepare_rxqueue() failed: %d",
5576849994eSSebastien Roy 			    err);
55862dadd65SYuri Pankov 			goto error_tx_queue;
55962dadd65SYuri Pankov 		}
56062dadd65SYuri Pankov 	} else {
56162dadd65SYuri Pankov 		VMXNET3_WARN(dp, "invalid rx ring size (%d)\n", rxQueueSize);
5626849994eSSebastien Roy 		err = EINVAL;
56362dadd65SYuri Pankov 		goto error_tx_queue;
56462dadd65SYuri Pankov 	}
56562dadd65SYuri Pankov 
56662dadd65SYuri Pankov 	/*
56762dadd65SYuri Pankov 	 * Allocate the Tx DMA handle
56862dadd65SYuri Pankov 	 */
5696849994eSSebastien Roy 	if ((dmaerr = ddi_dma_alloc_handle(dp->dip, &vmxnet3_dma_attrs_tx,
5706849994eSSebastien Roy 	    DDI_DMA_SLEEP, NULL, &dp->txDmaHandle)) != DDI_SUCCESS) {
5716849994eSSebastien Roy 		VMXNET3_WARN(dp, "ddi_dma_alloc_handle() failed: %d", dmaerr);
5726849994eSSebastien Roy 		err = vmxnet3_dmaerr2errno(dmaerr);
57362dadd65SYuri Pankov 		goto error_rx_queue;
57462dadd65SYuri Pankov 	}
57562dadd65SYuri Pankov 
57662dadd65SYuri Pankov 	/*
57762dadd65SYuri Pankov 	 * Activate the device
57862dadd65SYuri Pankov 	 */
58062dadd65SYuri Pankov 	ret32 = VMXNET3_BAR1_GET32(dp, VMXNET3_REG_CMD);
58162dadd65SYuri Pankov 	if (ret32) {
58262dadd65SYuri Pankov 		VMXNET3_WARN(dp, "ACTIVATE_DEV failed: 0x%x\n", ret32);
5836849994eSSebastien Roy 		err = ENXIO;
58462dadd65SYuri Pankov 		goto error_txhandle;
58562dadd65SYuri Pankov 	}
58662dadd65SYuri Pankov 	dp->devEnabled = B_TRUE;
58762dadd65SYuri Pankov 
58862dadd65SYuri Pankov 	VMXNET3_BAR0_PUT32(dp, VMXNET3_REG_RXPROD,
58962dadd65SYuri Pankov 	    dp->txQueue.cmdRing.size - 1);
59062dadd65SYuri Pankov 
59162dadd65SYuri Pankov 	/*
59262dadd65SYuri Pankov 	 * Update the RX filters, must be done after ACTIVATE_DEV
59362dadd65SYuri Pankov 	 */
59462dadd65SYuri Pankov 	dp->rxMode = VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST;
59562dadd65SYuri Pankov 	vmxnet3_refresh_rxfilter(dp);
59662dadd65SYuri Pankov 
59762dadd65SYuri Pankov 	/*
59862dadd65SYuri Pankov 	 * Get the link state now because no events will be generated
59962dadd65SYuri Pankov 	 */
60062dadd65SYuri Pankov 	vmxnet3_refresh_linkstate(dp);
60162dadd65SYuri Pankov 	mac_link_update(dp->mac, dp->linkState);
60262dadd65SYuri Pankov 
60362dadd65SYuri Pankov 	/*
60462dadd65SYuri Pankov 	 * Finally, unmask the interrupt
60562dadd65SYuri Pankov 	 */
60662dadd65SYuri Pankov 	VMXNET3_BAR0_PUT32(dp, VMXNET3_REG_IMR, 0);
60762dadd65SYuri Pankov 
6086849994eSSebastien Roy 	return (0);
60962dadd65SYuri Pankov 
61062dadd65SYuri Pankov error_txhandle:
61162dadd65SYuri Pankov 	ddi_dma_free_handle(&dp->txDmaHandle);
61262dadd65SYuri Pankov error_rx_queue:
61362dadd65SYuri Pankov 	vmxnet3_destroy_rxqueue(dp);
61462dadd65SYuri Pankov error_tx_queue:
61562dadd65SYuri Pankov 	vmxnet3_destroy_txqueue(dp);
61662dadd65SYuri Pankov error_shared_data:
61762dadd65SYuri Pankov 	vmxnet3_destroy_drivershared(dp);
61862dadd65SYuri Pankov error:
6196849994eSSebastien Roy 	return (err);
62062dadd65SYuri Pankov }
62162dadd65SYuri Pankov 
62262dadd65SYuri Pankov /*
623ca5345b6SSebastien Roy  * Stop a vmxnet3 device: send a stop command to the device and
624ca5345b6SSebastien Roy  * de-allocate the shared data structures.
62562dadd65SYuri Pankov  */
62662dadd65SYuri Pankov static void
vmxnet3_stop(void * data)62762dadd65SYuri Pankov vmxnet3_stop(void *data)
62862dadd65SYuri Pankov {
62962dadd65SYuri Pankov 	vmxnet3_softc_t *dp = data;
63062dadd65SYuri Pankov 
63162dadd65SYuri Pankov 	VMXNET3_DEBUG(dp, 1, "stop()\n");
63262dadd65SYuri Pankov 
63362dadd65SYuri Pankov 	/*
63462dadd65SYuri Pankov 	 * Take the 2 locks related to asynchronous events.
63562dadd65SYuri Pankov 	 * These events should always check dp->devEnabled before poking dp.
63662dadd65SYuri Pankov 	 */
63762dadd65SYuri Pankov 	mutex_enter(&dp->intrLock);
63862dadd65SYuri Pankov 	mutex_enter(&dp->rxPoolLock);
63962dadd65SYuri Pankov 	VMXNET3_BAR0_PUT32(dp, VMXNET3_REG_IMR, 1);
64062dadd65SYuri Pankov 	dp->devEnabled = B_FALSE;
64262dadd65SYuri Pankov 	mutex_exit(&dp->rxPoolLock);
64362dadd65SYuri Pankov 	mutex_exit(&dp->intrLock);
64462dadd65SYuri Pankov 
64562dadd65SYuri Pankov 	ddi_dma_free_handle(&dp->txDmaHandle);
64662dadd65SYuri Pankov 
64762dadd65SYuri Pankov 	vmxnet3_destroy_rxqueue(dp);
64862dadd65SYuri Pankov 	vmxnet3_destroy_txqueue(dp);
64962dadd65SYuri Pankov 
65062dadd65SYuri Pankov 	vmxnet3_destroy_drivershared(dp);
65162dadd65SYuri Pankov }
65262dadd65SYuri Pankov 
65362dadd65SYuri Pankov /*
654ca5345b6SSebastien Roy  * Set or unset promiscuous mode on a vmxnet3 device.
65562dadd65SYuri Pankov  */
65662dadd65SYuri Pankov static int
vmxnet3_setpromisc(void * data,boolean_t promisc)65762dadd65SYuri Pankov vmxnet3_setpromisc(void *data, boolean_t promisc)
65862dadd65SYuri Pankov {
65962dadd65SYuri Pankov 	vmxnet3_softc_t *dp = data;
66062dadd65SYuri Pankov 
66162dadd65SYuri Pankov 	VMXNET3_DEBUG(dp, 2, "setpromisc(%s)\n", promisc ? "TRUE" : "FALSE");
66262dadd65SYuri Pankov 
66362dadd65SYuri Pankov 	if (promisc) {
66462dadd65SYuri Pankov 		dp->rxMode |= VMXNET3_RXM_PROMISC;
66562dadd65SYuri Pankov 	} else {
66662dadd65SYuri Pankov 		dp->rxMode &= ~VMXNET3_RXM_PROMISC;
66762dadd65SYuri Pankov 	}
66862dadd65SYuri Pankov 
66962dadd65SYuri Pankov 	vmxnet3_refresh_rxfilter(dp);
67062dadd65SYuri Pankov 
6716849994eSSebastien Roy 	return (0);
67262dadd65SYuri Pankov }
67362dadd65SYuri Pankov 
67462dadd65SYuri Pankov /*
675ca5345b6SSebastien Roy  * Add or remove a multicast address from/to a vmxnet3 device.
67662dadd65SYuri Pankov  *
677ca5345b6SSebastien Roy  * Returns:
6786849994eSSebastien Roy  *	0 on success, non-zero on failure.
67962dadd65SYuri Pankov  */
68062dadd65SYuri Pankov static int
vmxnet3_multicst(void * data,boolean_t add,const uint8_t * macaddr)68162dadd65SYuri Pankov vmxnet3_multicst(void *data, boolean_t add, const uint8_t *macaddr)
68262dadd65SYuri Pankov {
68362dadd65SYuri Pankov 	vmxnet3_softc_t *dp = data;
68462dadd65SYuri Pankov 	vmxnet3_dmabuf_t newMfTable;
6856849994eSSebastien Roy 	int ret = 0;
68662dadd65SYuri Pankov 	uint16_t macIdx;
68762dadd65SYuri Pankov 	size_t allocSize;
68862dadd65SYuri Pankov 
68962dadd65SYuri Pankov 	VMXNET3_DEBUG(dp, 2, "multicst(%s, "MACADDR_FMT")\n",
69062dadd65SYuri Pankov 	    add ? "add" : "remove", MACADDR_FMT_ARGS(macaddr));
69162dadd65SYuri Pankov 
69262dadd65SYuri Pankov 	/*
69362dadd65SYuri Pankov 	 * First lookup the position of the given MAC to check if it is
69462dadd65SYuri Pankov 	 * present in the existing MF table.
69562dadd65SYuri Pankov 	 */
69662dadd65SYuri Pankov 	for (macIdx = 0; macIdx < dp->mfTable.bufLen; macIdx += 6) {
69762dadd65SYuri Pankov 		if (memcmp(&dp->mfTable.buf[macIdx], macaddr, 6) == 0) {
69862dadd65SYuri Pankov 			break;
69962dadd65SYuri Pankov 		}
70062dadd65SYuri Pankov 	}
70162dadd65SYuri Pankov 
70262dadd65SYuri Pankov 	/*
70362dadd65SYuri Pankov 	 * Check for 2 situations we can handle gracefully by bailing out:
70462dadd65SYuri Pankov 	 * Adding an already existing filter or removing a non-existing one.
70562dadd65SYuri Pankov 	 */
70662dadd65SYuri Pankov 	if (add && macIdx < dp->mfTable.bufLen) {
70762dadd65SYuri Pankov 		VMXNET3_WARN(dp, MACADDR_FMT " already in MC filter list "
70862dadd65SYuri Pankov 		    "@ %u\n", MACADDR_FMT_ARGS(macaddr), macIdx / 6);
70962dadd65SYuri Pankov 		ASSERT(B_FALSE);
71062dadd65SYuri Pankov 		goto done;
71162dadd65SYuri Pankov 	}
71262dadd65SYuri Pankov 	if (!add && macIdx == dp->mfTable.bufLen) {
71362dadd65SYuri Pankov 		VMXNET3_WARN(dp, MACADDR_FMT " not in MC filter list @ %u\n",
71462dadd65SYuri Pankov 		    MACADDR_FMT_ARGS(macaddr), macIdx / 6);
71562dadd65SYuri Pankov 		ASSERT(B_FALSE);
71662dadd65SYuri Pankov 		goto done;
71762dadd65SYuri Pankov 	}
71862dadd65SYuri Pankov 
71962dadd65SYuri Pankov 	/*
72062dadd65SYuri Pankov 	 * Create the new MF table
72162dadd65SYuri Pankov 	 */
72262dadd65SYuri Pankov 	allocSize = dp->mfTable.bufLen + (add ? 6 : -6);
72362dadd65SYuri Pankov 	if (allocSize) {
72462dadd65SYuri Pankov 		ret = vmxnet3_alloc_dma_mem_1(dp, &newMfTable, allocSize,
72562dadd65SYuri Pankov 		    B_TRUE);
7266849994eSSebastien Roy 		ASSERT(ret == 0);
72762dadd65SYuri Pankov 		if (add) {
72862dadd65SYuri Pankov 			(void) memcpy(newMfTable.buf, dp->mfTable.buf,
72962dadd65SYuri Pankov 			    dp->mfTable.bufLen);
73062dadd65SYuri Pankov 			(void) memcpy(newMfTable.buf + dp->mfTable.bufLen,
73162dadd65SYuri Pankov 			    macaddr, 6);