162dadd65SYuri Pankov /*
2ca5345b6SSebastien Roy  * Copyright (C) 2007-2014 VMware, Inc. All rights reserved.
362dadd65SYuri Pankov  *
462dadd65SYuri Pankov  * The contents of this file are subject to the terms of the Common
562dadd65SYuri Pankov  * Development and Distribution License (the "License") version 1.0
662dadd65SYuri Pankov  * and no later version.  You may not use this file except in
762dadd65SYuri Pankov  * compliance with the License.
862dadd65SYuri Pankov  *
962dadd65SYuri Pankov  * You can obtain a copy of the License at
1062dadd65SYuri Pankov  *         http://www.opensource.org/licenses/cddl1.php
1162dadd65SYuri Pankov  *
1262dadd65SYuri Pankov  * See the License for the specific language governing permissions
1362dadd65SYuri Pankov  * and limitations under the License.
1462dadd65SYuri Pankov  */
1562dadd65SYuri Pankov 
1662dadd65SYuri Pankov /*
176849994eSSebastien Roy  * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
1862dadd65SYuri Pankov  */
1962dadd65SYuri Pankov 
2062dadd65SYuri Pankov #include <vmxnet3.h>
2162dadd65SYuri Pankov 
22ca5345b6SSebastien Roy /*
23ca5345b6SSebastien Roy  * This driver is based on VMware's version 3227872, and contains additional
24ca5345b6SSebastien Roy  * enhancements (see README.txt).
25ca5345b6SSebastien Roy  */
26ca5345b6SSebastien Roy #define	BUILD_NUMBER_NUMERIC	3227872
2762dadd65SYuri Pankov 
286849994eSSebastien Roy /*
296849994eSSebastien Roy  * If we run out of rxPool buffers, only allocate if the MTU is <= PAGESIZE
306849994eSSebastien Roy  * so that we don't have to incur the cost of allocating multiple contiguous
316849994eSSebastien Roy  * pages (very slow) in interrupt context.
326849994eSSebastien Roy  */
336849994eSSebastien Roy #define	VMXNET3_ALLOC_OK(dp)	((dp)->cur_mtu <= PAGESIZE)
346849994eSSebastien Roy 
3562dadd65SYuri Pankov /*
3662dadd65SYuri Pankov  * TODO:
3762dadd65SYuri Pankov  *    - Tx data ring
3862dadd65SYuri Pankov  *    - MAC_CAPAB_POLL support
3962dadd65SYuri Pankov  *    - Dynamic RX pool
4062dadd65SYuri Pankov  */
4162dadd65SYuri Pankov 
4262dadd65SYuri Pankov static int vmxnet3_getstat(void *, uint_t, uint64_t *);
4362dadd65SYuri Pankov static int vmxnet3_start(void *);
4462dadd65SYuri Pankov static void vmxnet3_stop(void *);
4562dadd65SYuri Pankov static int vmxnet3_setpromisc(void *, boolean_t);
4662dadd65SYuri Pankov static void vmxnet3_ioctl(void *arg, queue_t *wq, mblk_t *mp);
4762dadd65SYuri Pankov static int vmxnet3_multicst(void *, boolean_t, const uint8_t *);
4862dadd65SYuri Pankov static int vmxnet3_unicst(void *, const uint8_t *);
4962dadd65SYuri Pankov static boolean_t vmxnet3_getcapab(void *, mac_capab_t, void *);
50ca5345b6SSebastien Roy static int vmxnet3_get_prop(void *, const char *, mac_prop_id_t, uint_t,
51ca5345b6SSebastien Roy     void *);
52ca5345b6SSebastien Roy static int vmxnet3_set_prop(void *, const char *, mac_prop_id_t, uint_t,
5362dadd65SYuri Pankov     const void *);
54ca5345b6SSebastien Roy static void vmxnet3_prop_info(void *, const char *, mac_prop_id_t,
5562dadd65SYuri Pankov     mac_prop_info_handle_t);
5662dadd65SYuri Pankov 
5762dadd65SYuri Pankov int vmxnet3s_debug = 0;
5862dadd65SYuri Pankov 
5962dadd65SYuri Pankov /* MAC callbacks */
6062dadd65SYuri Pankov static mac_callbacks_t vmxnet3_mac_callbacks = {
6162dadd65SYuri Pankov 	.mc_callbacks =	MC_GETCAPAB | MC_IOCTL | MC_SETPROP | MC_PROPINFO,
6262dadd65SYuri Pankov 	.mc_getstat =	vmxnet3_getstat,
6362dadd65SYuri Pankov 	.mc_start =	vmxnet3_start,
6462dadd65SYuri Pankov 	.mc_stop =	vmxnet3_stop,
6562dadd65SYuri Pankov 	.mc_setpromisc = vmxnet3_setpromisc,
6662dadd65SYuri Pankov 	.mc_multicst =	vmxnet3_multicst,
6762dadd65SYuri Pankov 	.mc_unicst =	vmxnet3_unicst,
6862dadd65SYuri Pankov 	.mc_tx =	vmxnet3_tx,
6962dadd65SYuri Pankov 	.mc_ioctl =	vmxnet3_ioctl,
7062dadd65SYuri Pankov 	.mc_getcapab =	vmxnet3_getcapab,
71ca5345b6SSebastien Roy 	.mc_getprop =	vmxnet3_get_prop,
72ca5345b6SSebastien Roy 	.mc_setprop =	vmxnet3_set_prop,
73ca5345b6SSebastien Roy 	.mc_propinfo =	vmxnet3_prop_info
7462dadd65SYuri Pankov };
7562dadd65SYuri Pankov 
7662dadd65SYuri Pankov /* Tx DMA engine description */
7762dadd65SYuri Pankov static ddi_dma_attr_t vmxnet3_dma_attrs_tx = {
7862dadd65SYuri Pankov 	.dma_attr_version =	DMA_ATTR_V0,
7962dadd65SYuri Pankov 	.dma_attr_addr_lo =	0x0000000000000000ull,
8062dadd65SYuri Pankov 	.dma_attr_addr_hi =	0xFFFFFFFFFFFFFFFFull,
8162dadd65SYuri Pankov 	.dma_attr_count_max =	0xFFFFFFFFFFFFFFFFull,
8262dadd65SYuri Pankov 	.dma_attr_align =	0x0000000000000001ull,
8362dadd65SYuri Pankov 	.dma_attr_burstsizes =	0x0000000000000001ull,
8462dadd65SYuri Pankov 	.dma_attr_minxfer =	0x00000001,
8562dadd65SYuri Pankov 	.dma_attr_maxxfer =	0x000000000000FFFFull,
8662dadd65SYuri Pankov 	.dma_attr_seg =		0xFFFFFFFFFFFFFFFFull,
8762dadd65SYuri Pankov 	.dma_attr_sgllen =	-1,
8862dadd65SYuri Pankov 	.dma_attr_granular =	0x00000001,
8962dadd65SYuri Pankov 	.dma_attr_flags =	0
9062dadd65SYuri Pankov };
9162dadd65SYuri Pankov 
9262dadd65SYuri Pankov /* --- */
9362dadd65SYuri Pankov 
9462dadd65SYuri Pankov /*
95ca5345b6SSebastien Roy  * Fetch the statistics of a vmxnet3 device.
9662dadd65SYuri Pankov  *
97ca5345b6SSebastien Roy  * Returns:
986849994eSSebastien Roy  *	0 on success, non-zero on failure.
9962dadd65SYuri Pankov  */
10062dadd65SYuri Pankov static int
vmxnet3_getstat(void * data,uint_t stat,uint64_t * val)10162dadd65SYuri Pankov vmxnet3_getstat(void *data, uint_t stat, uint64_t *val)
10262dadd65SYuri Pankov {
10362dadd65SYuri Pankov 	vmxnet3_softc_t *dp = data;
10462dadd65SYuri Pankov 	UPT1_TxStats *txStats;
10562dadd65SYuri Pankov 	UPT1_RxStats *rxStats;
10662dadd65SYuri Pankov 
10762dadd65SYuri Pankov 	VMXNET3_DEBUG(dp, 3, "getstat(%u)\n", stat);
10862dadd65SYuri Pankov 
10962dadd65SYuri Pankov 	if (!dp->devEnabled) {
1106849994eSSebastien Roy 		return (EBUSY);
11162dadd65SYuri Pankov 	}
11262dadd65SYuri Pankov 
11362dadd65SYuri Pankov 	txStats = &VMXNET3_TQDESC(dp)->stats;
11462dadd65SYuri Pankov 	rxStats = &VMXNET3_RQDESC(dp)->stats;
11562dadd65SYuri Pankov 
11662dadd65SYuri Pankov 	/*
11762dadd65SYuri Pankov 	 * First touch the related register
11862dadd65SYuri Pankov 	 */
11962dadd65SYuri Pankov 	switch (stat) {
12062dadd65SYuri Pankov 	case MAC_STAT_MULTIRCV:
12162dadd65SYuri Pankov 	case MAC_STAT_BRDCSTRCV:
12262dadd65SYuri Pankov 	case MAC_STAT_MULTIXMT:
12362dadd65SYuri Pankov 	case MAC_STAT_BRDCSTXMT:
12462dadd65SYuri Pankov 	case MAC_STAT_NORCVBUF:
12562dadd65SYuri Pankov 	case MAC_STAT_IERRORS:
12662dadd65SYuri Pankov 	case MAC_STAT_NOXMTBUF:
12762dadd65SYuri Pankov 	case MAC_STAT_OERRORS:
12862dadd65SYuri Pankov 	case MAC_STAT_RBYTES:
12962dadd65SYuri Pankov 	case MAC_STAT_IPACKETS:
13062dadd65SYuri Pankov 	case MAC_STAT_OBYTES:
13162dadd65SYuri Pankov 	case MAC_STAT_OPACKETS:
13262dadd65SYuri Pankov 		VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
13362dadd65SYuri Pankov 		break;
13462dadd65SYuri Pankov 	case MAC_STAT_IFSPEED:
13562dadd65SYuri Pankov 	case MAC_STAT_COLLISIONS:
13662dadd65SYuri Pankov 	case ETHER_STAT_LINK_DUPLEX:
13762dadd65SYuri Pankov 		/* nothing */
13862dadd65SYuri Pankov 		break;
13962dadd65SYuri Pankov 	default:
1406849994eSSebastien Roy 		return (ENOTSUP);
14162dadd65SYuri Pankov 	}
14262dadd65SYuri Pankov 
14362dadd65SYuri Pankov 	/*
14462dadd65SYuri Pankov 	 * Then fetch the corresponding stat
14562dadd65SYuri Pankov 	 */
14662dadd65SYuri Pankov 	switch (stat) {
14762dadd65SYuri Pankov 	case MAC_STAT_IFSPEED:
14862dadd65SYuri Pankov 		*val = dp->linkSpeed;
14962dadd65SYuri Pankov 		break;
15062dadd65SYuri Pankov 	case MAC_STAT_MULTIRCV:
15162dadd65SYuri Pankov 		*val = rxStats->mcastPktsRxOK;
15262dadd65SYuri Pankov 		break;
15362dadd65SYuri Pankov 	case MAC_STAT_BRDCSTRCV:
15462dadd65SYuri Pankov 		*val = rxStats->bcastPktsRxOK;
15562dadd65SYuri Pankov 		break;
15662dadd65SYuri Pankov 	case MAC_STAT_MULTIXMT:
15762dadd65SYuri Pankov 		*val = txStats->mcastPktsTxOK;
15862dadd65SYuri Pankov 		break;
15962dadd65SYuri Pankov 	case MAC_STAT_BRDCSTXMT:
16062dadd65SYuri Pankov 		*val = txStats->bcastPktsTxOK;
16162dadd65SYuri Pankov 		break;
16262dadd65SYuri Pankov 	case MAC_STAT_NORCVBUF:
16362dadd65SYuri Pankov 		*val = rxStats->pktsRxOutOfBuf + dp->rx_alloc_failed;
16462dadd65SYuri Pankov 		break;
16562dadd65SYuri Pankov 	case MAC_STAT_IERRORS:
16662dadd65SYuri Pankov 		*val = rxStats->pktsRxError;
16762dadd65SYuri Pankov 		break;
16862dadd65SYuri Pankov 	case MAC_STAT_NOXMTBUF:
16962dadd65SYuri Pankov 		*val = txStats->pktsTxDiscard + dp->tx_pullup_failed;
17062dadd65SYuri Pankov 		break;
17162dadd65SYuri Pankov 	case MAC_STAT_OERRORS:
17262dadd65SYuri Pankov 		*val = txStats->pktsTxError + dp->tx_error;
17362dadd65SYuri Pankov 		break;
17462dadd65SYuri Pankov 	case MAC_STAT_COLLISIONS:
17562dadd65SYuri Pankov 		*val = 0;
17662dadd65SYuri Pankov 		break;
17762dadd65SYuri Pankov 	case MAC_STAT_RBYTES:
17862dadd65SYuri Pankov 		*val = rxStats->ucastBytesRxOK + rxStats->mcastBytesRxOK +
17962dadd65SYuri Pankov 		    rxStats->bcastBytesRxOK;
18062dadd65SYuri Pankov 		break;
18162dadd65SYuri Pankov 	case MAC_STAT_IPACKETS:
18262dadd65SYuri Pankov 		*val = rxStats->ucastPktsRxOK + rxStats->mcastPktsRxOK +
18362dadd65SYuri Pankov 		    rxStats->bcastPktsRxOK;
18462dadd65SYuri Pankov 		break;
18562dadd65SYuri Pankov 	case MAC_STAT_OBYTES:
18662dadd65SYuri Pankov 		*val = txStats->ucastBytesTxOK + txStats->mcastBytesTxOK +
18762dadd65SYuri Pankov 		    txStats->bcastBytesTxOK;
18862dadd65SYuri Pankov 		break;
18962dadd65SYuri Pankov 	case MAC_STAT_OPACKETS:
19062dadd65SYuri Pankov 		*val = txStats->ucastPktsTxOK + txStats->mcastPktsTxOK +
19162dadd65SYuri Pankov 		    txStats->bcastPktsTxOK;
19262dadd65SYuri Pankov 		break;
19362dadd65SYuri Pankov 	case ETHER_STAT_LINK_DUPLEX:
19462dadd65SYuri Pankov 		*val = LINK_DUPLEX_FULL;
19562dadd65SYuri Pankov 		break;
19662dadd65SYuri Pankov 	default:
19762dadd65SYuri Pankov 		ASSERT(B_FALSE);
19862dadd65SYuri Pankov 	}
19962dadd65SYuri Pankov 
2006849994eSSebastien Roy 	return (0);
20162dadd65SYuri Pankov }
20262dadd65SYuri Pankov 
20362dadd65SYuri Pankov /*
204ca5345b6SSebastien Roy  * Allocate and initialize the shared data structures of a vmxnet3 device.
20562dadd65SYuri Pankov  *
206ca5345b6SSebastien Roy  * Returns:
2076849994eSSebastien Roy  *	0 on sucess, non-zero on failure.
20862dadd65SYuri Pankov  */
20962dadd65SYuri Pankov static int
vmxnet3_prepare_drivershared(vmxnet3_softc_t * dp)21062dadd65SYuri Pankov vmxnet3_prepare_drivershared(vmxnet3_softc_t *dp)
21162dadd65SYuri Pankov {
21262dadd65SYuri Pankov 	Vmxnet3_DriverShared *ds;
21362dadd65SYuri Pankov 	size_t allocSize = sizeof (Vmxnet3_DriverShared);
2146849994eSSebastien Roy 	int err;
21562dadd65SYuri Pankov 
2166849994eSSebastien Roy 	if ((err = vmxnet3_alloc_dma_mem_1(dp, &dp->sharedData, allocSize,
2176849994eSSebastien Roy 	    B_TRUE)) != 0) {
2186849994eSSebastien Roy 		return (err);
21962dadd65SYuri Pankov 	}
22062dadd65SYuri Pankov 	ds = VMXNET3_DS(dp);
22162dadd65SYuri Pankov 	(void) memset(ds, 0, allocSize);
22262dadd65SYuri Pankov 
22362dadd65SYuri Pankov 	allocSize = sizeof (Vmxnet3_TxQueueDesc) + sizeof (Vmxnet3_RxQueueDesc);
2246849994eSSebastien Roy 	if ((err = vmxnet3_alloc_dma_mem_128(dp, &dp->queueDescs, allocSize,
2256849994eSSebastien Roy 	    B_TRUE)) != 0) {
22662dadd65SYuri Pankov 		vmxnet3_free_dma_mem(&dp->sharedData);
2276849994eSSebastien Roy 		return (err);
22862dadd65SYuri Pankov 	}
22962dadd65SYuri Pankov 	(void) memset(dp->queueDescs.buf, 0, allocSize);
23062dadd65SYuri Pankov 
23162dadd65SYuri Pankov 	ds->magic = VMXNET3_REV1_MAGIC;
23262dadd65SYuri Pankov 
23362dadd65SYuri Pankov 	/* Take care of most of devRead */
23462dadd65SYuri Pankov 	ds->devRead.misc.driverInfo.version = BUILD_NUMBER_NUMERIC;
23562dadd65SYuri Pankov #ifdef _LP64
23662dadd65SYuri Pankov 	ds->devRead.misc.driverInfo.gos.gosBits = VMXNET3_GOS_BITS_64;
23762dadd65SYuri Pankov #else
23862dadd65SYuri Pankov 	ds->devRead.misc.driverInfo.gos.gosBits = VMXNET3_GOS_BITS_32;
23962dadd65SYuri Pankov #endif
24062dadd65SYuri Pankov 	ds->devRead.misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_SOLARIS;
24162dadd65SYuri Pankov 	ds->devRead.misc.driverInfo.gos.gosVer = 10;
24262dadd65SYuri Pankov 	ds->devRead.misc.driverInfo.vmxnet3RevSpt = 1;
24362dadd65SYuri Pankov 	ds->devRead.misc.driverInfo.uptVerSpt = 1;
24462dadd65SYuri Pankov 
24562dadd65SYuri Pankov 	ds->devRead.misc.uptFeatures = UPT1_F_RXCSUM;
24662dadd65SYuri Pankov 	ds->devRead.misc.mtu = dp->cur_mtu;
24762dadd65SYuri Pankov 
24862dadd65SYuri Pankov 	/* XXX: ds->devRead.misc.maxNumRxSG */
24962dadd65SYuri Pankov 	ds->devRead.misc.numTxQueues = 1;
25062dadd65SYuri Pankov 	ds->devRead.misc.numRxQueues = 1;
25162dadd65SYuri Pankov 	ds->devRead.misc.queueDescPA = dp->queueDescs.bufPA;
25262dadd65SYuri Pankov 	ds->devRead.misc.queueDescLen = allocSize;
25362dadd65SYuri Pankov 
25462dadd65SYuri Pankov 	/* TxQueue and RxQueue information is filled in other functions */
25562dadd65SYuri Pankov 	ds->devRead.intrConf.autoMask = (dp->intrMaskMode == VMXNET3_IMM_AUTO);
25662dadd65SYuri Pankov 	ds->devRead.intrConf.numIntrs = 1;
25762dadd65SYuri Pankov 	/* XXX: ds->intr.modLevels */
25862dadd65SYuri Pankov 	ds->devRead.intrConf.eventIntrIdx = 0;
25962dadd65SYuri Pankov 
26062dadd65SYuri Pankov 	VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_DSAL,
26162dadd65SYuri Pankov 	    VMXNET3_ADDR_LO(dp->sharedData.bufPA));
26262dadd65SYuri Pankov 	VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_DSAH,
26362dadd65SYuri Pankov 	    VMXNET3_ADDR_HI(dp->sharedData.bufPA));
26462dadd65SYuri Pankov 
2656849994eSSebastien Roy 	return (0);
26662dadd65SYuri Pankov }
26762dadd65SYuri Pankov 
26862dadd65SYuri Pankov /*
269ca5345b6SSebastien Roy  * Destroy the shared data structures of a vmxnet3 device.
27062dadd65SYuri Pankov  */
27162dadd65SYuri Pankov static void
vmxnet3_destroy_drivershared(vmxnet3_softc_t * dp)27262dadd65SYuri Pankov vmxnet3_destroy_drivershared(vmxnet3_softc_t *dp)
27362dadd65SYuri Pankov {
27462dadd65SYuri Pankov 	VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_DSAL, 0);
27562dadd65SYuri Pankov 	VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_DSAH, 0);
27662dadd65SYuri Pankov 
27762dadd65SYuri Pankov 	vmxnet3_free_dma_mem(&dp->queueDescs);
27862dadd65SYuri Pankov 	vmxnet3_free_dma_mem(&dp->sharedData);
27962dadd65SYuri Pankov }
28062dadd65SYuri Pankov 
28162dadd65SYuri Pankov /*
282ca5345b6SSebastien Roy  * Allocate and initialize the command ring of a queue.
28362dadd65SYuri Pankov  *
284ca5345b6SSebastien Roy  * Returns:
2856849994eSSebastien Roy  *	0 on success, non-zero on error.
28662dadd65SYuri Pankov  */
28762dadd65SYuri Pankov static int
vmxnet3_alloc_cmdring(vmxnet3_softc_t * dp,vmxnet3_cmdring_t * cmdRing)28862dadd65SYuri Pankov vmxnet3_alloc_cmdring(vmxnet3_softc_t *dp, vmxnet3_cmdring_t *cmdRing)
28962dadd65SYuri Pankov {
29062dadd65SYuri Pankov 	size_t ringSize = cmdRing->size * sizeof (Vmxnet3_TxDesc);
2916849994eSSebastien Roy 	int err;
29262dadd65SYuri Pankov 
2936849994eSSebastien Roy 	if ((err = vmxnet3_alloc_dma_mem_512(dp, &cmdRing->dma, ringSize,
2946849994eSSebastien Roy 	    B_TRUE)) != 0) {
2956849994eSSebastien Roy 		return (err);
29662dadd65SYuri Pankov 	}
29762dadd65SYuri Pankov 	(void) memset(cmdRing->dma.buf, 0, ringSize);
29862dadd65SYuri Pankov 	cmdRing->avail = cmdRing->size;
29962dadd65SYuri Pankov 	cmdRing->next2fill = 0;
30062dadd65SYuri Pankov 	cmdRing->gen = VMXNET3_INIT_GEN;
30162dadd65SYuri Pankov 
3026849994eSSebastien Roy 	return (0);
30362dadd65SYuri Pankov }
30462dadd65SYuri Pankov 
30562dadd65SYuri Pankov /*
306ca5345b6SSebastien Roy  * Allocate and initialize the completion ring of a queue.
30762dadd65SYuri Pankov  *
308ca5345b6SSebastien Roy  * Returns:
30962dadd65SYuri Pankov  *    DDI_SUCCESS or DDI_FAILURE.
31062dadd65SYuri Pankov  */
31162dadd65SYuri Pankov static int
vmxnet3_alloc_compring(vmxnet3_softc_t * dp,vmxnet3_compring_t * compRing)31262dadd65SYuri Pankov vmxnet3_alloc_compring(vmxnet3_softc_t *dp, vmxnet3_compring_t *compRing)
31362dadd65SYuri Pankov {
31462dadd65SYuri Pankov 	size_t ringSize = compRing->size * sizeof (Vmxnet3_TxCompDesc);
31562dadd65SYuri Pankov 
31662dadd65SYuri Pankov 	if (vmxnet3_alloc_dma_mem_512(dp, &compRing->dma, ringSize,
31762dadd65SYuri Pankov 	    B_TRUE) != DDI_SUCCESS) {
31862dadd65SYuri Pankov 		return (DDI_FAILURE);
31962dadd65SYuri Pankov 	}
32062dadd65SYuri Pankov 	(void) memset(compRing->dma.buf, 0, ringSize);
32162dadd65SYuri Pankov 	compRing->next2comp = 0;
32262dadd65SYuri Pankov 	compRing->gen = VMXNET3_INIT_GEN;
32362dadd65SYuri Pankov 
32462dadd65SYuri Pankov 	return (DDI_SUCCESS);
32562dadd65SYuri Pankov }
32662dadd65SYuri Pankov 
32762dadd65SYuri Pankov /*
328ca5345b6SSebastien Roy  * Initialize the tx queue of a vmxnet3 device.
32962dadd65SYuri Pankov  *
330ca5345b6SSebastien Roy  * Returns:
3316849994eSSebastien Roy  *	0 on success, non-zero on failure.
33262dadd65SYuri Pankov  */
33362dadd65SYuri Pankov static int
vmxnet3_prepare_txqueue(vmxnet3_softc_t * dp)33462dadd65SYuri Pankov vmxnet3_prepare_txqueue(vmxnet3_softc_t *dp)
33562dadd65SYuri Pankov {
33662dadd65SYuri Pankov 	Vmxnet3_TxQueueDesc *tqdesc = VMXNET3_TQDESC(dp);
33762dadd65SYuri Pankov 	vmxnet3_txqueue_t *txq = &dp->txQueue;
3386849994eSSebastien Roy 	int err;
33962dadd65SYuri Pankov 
34062dadd65SYuri Pankov 	ASSERT(!(txq->cmdRing.size & VMXNET3_RING_SIZE_MASK));
34162dadd65SYuri Pankov 	ASSERT(!(txq->compRing.size & VMXNET3_RING_SIZE_MASK));
34262dadd65SYuri Pankov 	ASSERT(!txq->cmdRing.dma.buf && !txq->compRing.dma.buf);
34362dadd65SYuri Pankov 
3446849994eSSebastien Roy 	if ((err = vmxnet3_alloc_cmdring(dp, &txq->cmdRing)) != 0) {
34562dadd65SYuri Pankov 		goto error;
34662dadd65SYuri Pankov 	}
34762dadd65SYuri Pankov 	tqdesc->conf.txRingBasePA = txq->cmdRing.dma.bufPA;
34862dadd65SYuri Pankov 	tqdesc->conf.txRingSize = txq->cmdRing.size;
34962dadd65SYuri Pankov 	tqdesc->conf.dataRingBasePA = 0;
35062dadd65SYuri Pankov 	tqdesc->conf.dataRingSize = 0;
35162dadd65SYuri Pankov 
3526849994eSSebastien Roy 	if ((err = vmxnet3_alloc_compring(dp, &txq->compRing)) != 0) {
35362dadd65SYuri Pankov 		goto error_cmdring;
35462dadd65SYuri Pankov 	}
35562dadd65SYuri Pankov 	tqdesc->conf.compRingBasePA = txq->compRing.dma.bufPA;
35662dadd65SYuri Pankov 	tqdesc->conf.compRingSize = txq->compRing.size;
35762dadd65SYuri Pankov 
35862dadd65SYuri Pankov 	txq->metaRing = kmem_zalloc(txq->cmdRing.size *
35962dadd65SYuri Pankov 	    sizeof (vmxnet3_metatx_t), KM_SLEEP);
36062dadd65SYuri Pankov 	ASSERT(txq->metaRing);
36162dadd65SYuri Pankov 
3626849994eSSebastien Roy 	if ((err = vmxnet3_txqueue_init(dp, txq)) != 0) {
36362dadd65SYuri Pankov 		goto error_mpring;
36462dadd65SYuri Pankov 	}
36562dadd65SYuri Pankov 
3666849994eSSebastien Roy 	return (0);
36762dadd65SYuri Pankov 
36862dadd65SYuri Pankov error_mpring:
36962dadd65SYuri Pankov 	kmem_free(txq->metaRing, txq->cmdRing.size * sizeof (vmxnet3_metatx_t));
37062dadd65SYuri Pankov 	vmxnet3_free_dma_mem(&txq->compRing.dma);
37162dadd65SYuri Pankov error_cmdring:
37262dadd65SYuri Pankov 	vmxnet3_free_dma_mem(&txq->cmdRing.dma);
37362dadd65SYuri Pankov error:
3746849994eSSebastien Roy 	return (err);
37562dadd65SYuri Pankov }
37662dadd65SYuri Pankov 
37762dadd65SYuri Pankov /*
378ca5345b6SSebastien Roy  * Initialize the rx queue of a vmxnet3 device.
37962dadd65SYuri Pankov  *
380ca5345b6SSebastien Roy  * Returns:
3816849994eSSebastien Roy  *	0 on success, non-zero on failure.
38262dadd65SYuri Pankov  */
38362dadd65SYuri Pankov static int
vmxnet3_prepare_rxqueue(vmxnet3_softc_t * dp)38462dadd65SYuri Pankov vmxnet3_prepare_rxqueue(vmxnet3_softc_t *dp)
38562dadd65SYuri Pankov {
38662dadd65SYuri Pankov 	Vmxnet3_RxQueueDesc *rqdesc = VMXNET3_RQDESC(dp);
38762dadd65SYuri Pankov 	vmxnet3_rxqueue_t *rxq = &dp->rxQueue;
3886849994eSSebastien Roy 	int err = 0;
38962dadd65SYuri Pankov 
39062dadd65SYuri Pankov 	ASSERT(!(rxq->cmdRing.size & VMXNET3_RING_SIZE_MASK));
39162dadd65SYuri Pankov 	ASSERT(!(rxq->compRing.size & VMXNET3_RING_SIZE_MASK));
39262dadd65SYuri Pankov 	ASSERT(!rxq->cmdRing.dma.buf && !rxq->compRing.dma.buf);
39362dadd65SYuri Pankov 
3946849994eSSebastien Roy 	if ((err = vmxnet3_alloc_cmdring(dp, &rxq->cmdRing)) != 0) {
39562dadd65SYuri Pankov 		goto error;
39662dadd65SYuri Pankov 	}
39762dadd65SYuri Pankov 	rqdesc->conf.rxRingBasePA[0] = rxq->cmdRing.dma.bufPA;
39862dadd65SYuri Pankov 	rqdesc->conf.rxRingSize[0] = rxq->cmdRing.size;
39962dadd65SYuri Pankov 	rqdesc->conf.rxRingBasePA[1] = 0;
40062dadd65SYuri Pankov 	rqdesc->conf.rxRingSize[1] = 0;
40162dadd65SYuri Pankov 
4026849994eSSebastien Roy 	if ((err = vmxnet3_alloc_compring(dp, &rxq->compRing)) != 0) {
40362dadd65SYuri Pankov 		goto error_cmdring;
40462dadd65SYuri Pankov 	}
40562dadd65SYuri Pankov 	rqdesc->conf.compRingBasePA = rxq->compRing.dma.bufPA;
40662dadd65SYuri Pankov 	rqdesc->conf.compRingSize = rxq->compRing.size;
40762dadd65SYuri Pankov 
40862dadd65SYuri Pankov 	rxq->bufRing = kmem_zalloc(rxq->cmdRing.size *
40962dadd65SYuri Pankov 	    sizeof (vmxnet3_bufdesc_t), KM_SLEEP);
41062dadd65SYuri Pankov 	ASSERT(rxq->bufRing);
41162dadd65SYuri Pankov 
4126849994eSSebastien Roy 	if ((err = vmxnet3_rxqueue_init(dp, rxq)) != 0) {
41362dadd65SYuri Pankov 		goto error_bufring;
41462dadd65SYuri Pankov 	}
41562dadd65SYuri Pankov 
4166849994eSSebastien Roy 	return (0);
41762dadd65SYuri Pankov 
41862dadd65SYuri Pankov error_bufring:
41962dadd65SYuri Pankov 	kmem_free(rxq->bufRing, rxq->cmdRing.size * sizeof (vmxnet3_bufdesc_t));
42062dadd65SYuri Pankov 	vmxnet3_free_dma_mem(&rxq->compRing.dma);
42162dadd65SYuri Pankov error_cmdring:
42262dadd65SYuri Pankov 	vmxnet3_free_dma_mem(&rxq->cmdRing.dma);
42362dadd65SYuri Pankov error:
4246849994eSSebastien Roy 	return (err);
42562dadd65SYuri Pankov }
42662dadd65SYuri Pankov 
42762dadd65SYuri Pankov /*
428ca5345b6SSebastien Roy  * Destroy the tx queue of a vmxnet3 device.
42962dadd65SYuri Pankov  */
43062dadd65SYuri Pankov static void
vmxnet3_destroy_txqueue(vmxnet3_softc_t * dp)43162dadd65SYuri Pankov vmxnet3_destroy_txqueue(vmxnet3_softc_t *dp)
43262dadd65SYuri Pankov {
43362dadd65SYuri Pankov 	vmxnet3_txqueue_t *txq = &dp->txQueue;
43462dadd65SYuri Pankov 
43562dadd65SYuri Pankov 	ASSERT(txq->metaRing);
43662dadd65SYuri Pankov 	ASSERT(txq->cmdRing.dma.buf && txq->compRing.dma.buf);
43762dadd65SYuri Pankov 
43862dadd65SYuri Pankov 	vmxnet3_txqueue_fini(dp, txq);
43962dadd65SYuri Pankov 
44062dadd65SYuri Pankov 	kmem_free(txq->metaRing, txq->cmdRing.size * sizeof (vmxnet3_metatx_t));
44162dadd65SYuri Pankov 
44262dadd65SYuri Pankov 	vmxnet3_free_dma_mem(&txq->cmdRing.dma);
44362dadd65SYuri Pankov 	vmxnet3_free_dma_mem(&txq->compRing.dma);
44462dadd65SYuri Pankov }
44562dadd65SYuri Pankov 
44662dadd65SYuri Pankov /*
447ca5345b6SSebastien Roy  * Destroy the rx queue of a vmxnet3 device.
44862dadd65SYuri Pankov  */
44962dadd65SYuri Pankov static void
vmxnet3_destroy_rxqueue(vmxnet3_softc_t * dp)45062dadd65SYuri Pankov vmxnet3_destroy_rxqueue(vmxnet3_softc_t *dp)
45162dadd65SYuri Pankov {
45262dadd65SYuri Pankov 	vmxnet3_rxqueue_t *rxq = &dp->rxQueue;
45362dadd65SYuri Pankov 
45462dadd65SYuri Pankov 	ASSERT(rxq->bufRing);
45562dadd65SYuri Pankov 	ASSERT(rxq->cmdRing.dma.buf && rxq->compRing.dma.buf);
45662dadd65SYuri Pankov 
45762dadd65SYuri Pankov 	vmxnet3_rxqueue_fini(dp, rxq);
45862dadd65SYuri Pankov 
45962dadd65SYuri Pankov 	kmem_free(rxq->bufRing, rxq->cmdRing.size * sizeof (vmxnet3_bufdesc_t));
46062dadd65SYuri Pankov 
46162dadd65SYuri Pankov 	vmxnet3_free_dma_mem(&rxq->cmdRing.dma);
46262dadd65SYuri Pankov 	vmxnet3_free_dma_mem(&rxq->compRing.dma);
46362dadd65SYuri Pankov }
46462dadd65SYuri Pankov 
46562dadd65SYuri Pankov /*
466ca5345b6SSebastien Roy  * Apply new RX filters settings to a vmxnet3 device.
46762dadd65SYuri Pankov  */
46862dadd65SYuri Pankov static void
vmxnet3_refresh_rxfilter(vmxnet3_softc_t * dp)46962dadd65SYuri Pankov vmxnet3_refresh_rxfilter(vmxnet3_softc_t *dp)
47062dadd65SYuri Pankov {
47162dadd65SYuri Pankov 	Vmxnet3_DriverShared *ds = VMXNET3_DS(dp);
47262dadd65SYuri Pankov 
47362dadd65SYuri Pankov 	ds->devRead.rxFilterConf.rxMode = dp->rxMode;
47462dadd65SYuri Pankov 	VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RX_MODE);
47562dadd65SYuri Pankov }
47662dadd65SYuri Pankov 
47762dadd65SYuri Pankov /*
478ca5345b6SSebastien Roy  * Fetch the link state of a vmxnet3 device.
47962dadd65SYuri Pankov  */
48062dadd65SYuri Pankov static void
vmxnet3_refresh_linkstate(vmxnet3_softc_t * dp)48162dadd65SYuri Pankov vmxnet3_refresh_linkstate(vmxnet3_softc_t *dp)
48262dadd65SYuri Pankov {
48362dadd65SYuri Pankov 	uint32_t ret32;
48462dadd65SYuri Pankov 
48562dadd65SYuri Pankov 	VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
48662dadd65SYuri Pankov 	ret32 = VMXNET3_BAR1_GET32(dp, VMXNET3_REG_CMD);
48762dadd65SYuri Pankov 	if (ret32 & 1) {
48862dadd65SYuri Pankov 		dp->linkState = LINK_STATE_UP;
48962dadd65SYuri Pankov 		dp->linkSpeed = (ret32 >> 16) * 1000000ULL;
49062dadd65SYuri Pankov 	} else {
49162dadd65SYuri Pankov 		dp->linkState = LINK_STATE_DOWN;
49262dadd65SYuri Pankov 		dp->linkSpeed = 0;
49362dadd65SYuri Pankov 	}
49462dadd65SYuri Pankov }
49562dadd65SYuri Pankov 
49662dadd65SYuri Pankov /*
497ca5345b6SSebastien Roy  * Start a vmxnet3 device: allocate and initialize the shared data
498ca5345b6SSebastien Roy  * structures and send a start command to the device.
49962dadd65SYuri Pankov  *
500ca5345b6SSebastien Roy  * Returns:
5016849994eSSebastien Roy  *	0 on success, non-zero error on failure.
50262dadd65SYuri Pankov  */
50362dadd65SYuri Pankov static int
vmxnet3_start(void * data)50462dadd65SYuri Pankov vmxnet3_start(void *data)
50562dadd65SYuri Pankov {
50662dadd65SYuri Pankov 	vmxnet3_softc_t *dp = data;
50762dadd65SYuri Pankov 	Vmxnet3_TxQueueDesc *tqdesc;
50862dadd65SYuri Pankov 	Vmxnet3_RxQueueDesc *rqdesc;
50962dadd65SYuri Pankov 	int txQueueSize, rxQueueSize;
51062dadd65SYuri Pankov 	uint32_t ret32;
5116849994eSSebastien Roy 	int err, dmaerr;
51262dadd65SYuri Pankov 
51362dadd65SYuri Pankov 	VMXNET3_DEBUG(dp, 1, "start()\n");
51462dadd65SYuri Pankov 
51562dadd65SYuri Pankov 	/*
51662dadd65SYuri Pankov 	 * Allocate vmxnet3's shared data and advertise its PA
51762dadd65SYuri Pankov 	 */
5186849994eSSebastien Roy 	if ((err = vmxnet3_prepare_drivershared(dp)) != 0) {
5196849994eSSebastien Roy 		VMXNET3_WARN(dp, "vmxnet3_prepare_drivershared() failed: %d",
5206849994eSSebastien Roy 		    err);
52162dadd65SYuri Pankov 		goto error;
52262dadd65SYuri Pankov 	}
52362dadd65SYuri Pankov 	tqdesc = VMXNET3_TQDESC(dp);
52462dadd65SYuri Pankov 	rqdesc = VMXNET3_RQDESC(dp);
52562dadd65SYuri Pankov 
52662dadd65SYuri Pankov 	/*
52762dadd65SYuri Pankov 	 * Create and initialize the tx queue
52862dadd65SYuri Pankov 	 */
52962dadd65SYuri Pankov 	txQueueSize = vmxnet3_getprop(dp, "TxRingSize", 32, 4096,
53062dadd65SYuri Pankov 	    VMXNET3_DEF_TX_RING_SIZE);
53162dadd65SYuri Pankov 	if (!(txQueueSize & VMXNET3_RING_SIZE_MASK)) {
53262dadd65SYuri Pankov 		dp->txQueue.cmdRing.size = txQueueSize;
53362dadd65SYuri Pankov 		dp->txQueue.compRing.size = txQueueSize;
53462dadd65SYuri Pankov 		dp->txQueue.sharedCtrl = &tqdesc->ctrl;
5356849994eSSebastien Roy 		if ((err = vmxnet3_prepare_txqueue(dp)) != 0) {
5366849994eSSebastien Roy 			VMXNET3_WARN(dp, "vmxnet3_prepare_txqueue() failed: %d",
5376849994eSSebastien Roy 			    err);
53862dadd65SYuri Pankov 			goto error_shared_data;
53962dadd65SYuri Pankov 		}
54062dadd65SYuri Pankov 	} else {
54162dadd65SYuri Pankov 		VMXNET3_WARN(dp, "invalid tx ring size (%d)\n", txQueueSize);
5426849994eSSebastien Roy 		err = EINVAL;
54362dadd65SYuri Pankov 		goto error_shared_data;
54462dadd65SYuri Pankov 	}
54562dadd65SYuri Pankov 
54662dadd65SYuri Pankov 	/*
54762dadd65SYuri Pankov 	 * Create and initialize the rx queue
54862dadd65SYuri Pankov 	 */
54962dadd65SYuri Pankov 	rxQueueSize = vmxnet3_getprop(dp, "RxRingSize", 32, 4096,
55062dadd65SYuri Pankov 	    VMXNET3_DEF_RX_RING_SIZE);
55162dadd65SYuri Pankov 	if (!(rxQueueSize & VMXNET3_RING_SIZE_MASK)) {
55262dadd65SYuri Pankov 		dp->rxQueue.cmdRing.size = rxQueueSize;
55362dadd65SYuri Pankov 		dp->rxQueue.compRing.size = rxQueueSize;
55462dadd65SYuri Pankov 		dp->rxQueue.sharedCtrl = &rqdesc->ctrl;
5556849994eSSebastien Roy 		if ((err = vmxnet3_prepare_rxqueue(dp)) != 0) {
5566849994eSSebastien Roy 			VMXNET3_WARN(dp, "vmxnet3_prepare_rxqueue() failed: %d",
5576849994eSSebastien Roy 			    err);
55862dadd65SYuri Pankov 			goto error_tx_queue;
55962dadd65SYuri Pankov 		}
56062dadd65SYuri Pankov 	} else {
56162dadd65SYuri Pankov 		VMXNET3_WARN(dp, "invalid rx ring size (%d)\n", rxQueueSize);
5626849994eSSebastien Roy 		err = EINVAL;
56362dadd65SYuri Pankov 		goto error_tx_queue;
56462dadd65SYuri Pankov 	}
56562dadd65SYuri Pankov 
56662dadd65SYuri Pankov 	/*
56762dadd65SYuri Pankov 	 * Allocate the Tx DMA handle
56862dadd65SYuri Pankov 	 */
5696849994eSSebastien Roy 	if ((dmaerr = ddi_dma_alloc_handle(dp->dip, &vmxnet3_dma_attrs_tx,
5706849994eSSebastien Roy 	    DDI_DMA_SLEEP, NULL, &dp->txDmaHandle)) != DDI_SUCCESS) {
5716849994eSSebastien Roy 		VMXNET3_WARN(dp, "ddi_dma_alloc_handle() failed: %d", dmaerr);
5726849994eSSebastien Roy 		err = vmxnet3_dmaerr2errno(dmaerr);
57362dadd65SYuri Pankov 		goto error_rx_queue;
57462dadd65SYuri Pankov 	}
57562dadd65SYuri Pankov 
57662dadd65SYuri Pankov 	/*
57762dadd65SYuri Pankov 	 * Activate the device
57862dadd65SYuri Pankov 	 */
57962dadd65SYuri Pankov 	VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV);
58062dadd65SYuri Pankov 	ret32 = VMXNET3_BAR1_GET32(dp, VMXNET3_REG_CMD);
58162dadd65SYuri Pankov 	if (ret32) {
58262dadd65SYuri Pankov 		VMXNET3_WARN(dp, "ACTIVATE_DEV failed: 0x%x\n", ret32);
5836849994eSSebastien Roy 		err = ENXIO;
58462dadd65SYuri Pankov 		goto error_txhandle;
58562dadd65SYuri Pankov 	}
58662dadd65SYuri Pankov 	dp->devEnabled = B_TRUE;
58762dadd65SYuri Pankov 
58862dadd65SYuri Pankov 	VMXNET3_BAR0_PUT32(dp, VMXNET3_REG_RXPROD,
58962dadd65SYuri Pankov 	    dp->txQueue.cmdRing.size - 1);
59062dadd65SYuri Pankov 
59162dadd65SYuri Pankov 	/*
59262dadd65SYuri Pankov 	 * Update the RX filters, must be done after ACTIVATE_DEV
59362dadd65SYuri Pankov 	 */
59462dadd65SYuri Pankov 	dp->rxMode = VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST;
59562dadd65SYuri Pankov 	vmxnet3_refresh_rxfilter(dp);
59662dadd65SYuri Pankov 
59762dadd65SYuri Pankov 	/*
59862dadd65SYuri Pankov 	 * Get the link state now because no events will be generated
59962dadd65SYuri Pankov 	 */
60062dadd65SYuri Pankov 	vmxnet3_refresh_linkstate(dp);
60162dadd65SYuri Pankov 	mac_link_update(dp->mac, dp->linkState);
60262dadd65SYuri Pankov 
60362dadd65SYuri Pankov 	/*
60462dadd65SYuri Pankov 	 * Finally, unmask the interrupt
60562dadd65SYuri Pankov 	 */
60662dadd65SYuri Pankov 	VMXNET3_BAR0_PUT32(dp, VMXNET3_REG_IMR, 0);
60762dadd65SYuri Pankov 
6086849994eSSebastien Roy 	return (0);
60962dadd65SYuri Pankov 
61062dadd65SYuri Pankov error_txhandle:
61162dadd65SYuri Pankov 	ddi_dma_free_handle(&dp->txDmaHandle);
61262dadd65SYuri Pankov error_rx_queue:
61362dadd65SYuri Pankov 	vmxnet3_destroy_rxqueue(dp);
61462dadd65SYuri Pankov error_tx_queue:
61562dadd65SYuri Pankov 	vmxnet3_destroy_txqueue(dp);
61662dadd65SYuri Pankov error_shared_data:
61762dadd65SYuri Pankov 	vmxnet3_destroy_drivershared(dp);
61862dadd65SYuri Pankov error:
6196849994eSSebastien Roy 	return (err);
62062dadd65SYuri Pankov }
62162dadd65SYuri Pankov 
62262dadd65SYuri Pankov /*
623ca5345b6SSebastien Roy  * Stop a vmxnet3 device: send a stop command to the device and
624ca5345b6SSebastien Roy  * de-allocate the shared data structures.
62562dadd65SYuri Pankov  */
62662dadd65SYuri Pankov static void
vmxnet3_stop(void * data)62762dadd65SYuri Pankov vmxnet3_stop(void *data)
62862dadd65SYuri Pankov {
62962dadd65SYuri Pankov 	vmxnet3_softc_t *dp = data;
63062dadd65SYuri Pankov 
63162dadd65SYuri Pankov 	VMXNET3_DEBUG(dp, 1, "stop()\n");
63262dadd65SYuri Pankov 
63362dadd65SYuri Pankov 	/*
63462dadd65SYuri Pankov 	 * Take the 2 locks related to asynchronous events.
63562dadd65SYuri Pankov 	 * These events should always check dp->devEnabled before poking dp.
63662dadd65SYuri Pankov 	 */
63762dadd65SYuri Pankov 	mutex_enter(&dp->intrLock);
63862dadd65SYuri Pankov 	mutex_enter(&dp->rxPoolLock);
63962dadd65SYuri Pankov 	VMXNET3_BAR0_PUT32(dp, VMXNET3_REG_IMR, 1);
64062dadd65SYuri Pankov 	dp->devEnabled = B_FALSE;
64162dadd65SYuri Pankov 	VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
64262dadd65SYuri Pankov 	mutex_exit(&dp->rxPoolLock);
64362dadd65SYuri Pankov 	mutex_exit(&dp->intrLock);
64462dadd65SYuri Pankov 
64562dadd65SYuri Pankov 	ddi_dma_free_handle(&dp->txDmaHandle);
64662dadd65SYuri Pankov 
64762dadd65SYuri Pankov 	vmxnet3_destroy_rxqueue(dp);
64862dadd65SYuri Pankov 	vmxnet3_destroy_txqueue(dp);
64962dadd65SYuri Pankov 
65062dadd65SYuri Pankov 	vmxnet3_destroy_drivershared(dp);
65162dadd65SYuri Pankov }
65262dadd65SYuri Pankov 
65362dadd65SYuri Pankov /*
654ca5345b6SSebastien Roy  * Set or unset promiscuous mode on a vmxnet3 device.
65562dadd65SYuri Pankov  */
65662dadd65SYuri Pankov static int
vmxnet3_setpromisc(void * data,boolean_t promisc)65762dadd65SYuri Pankov vmxnet3_setpromisc(void *data, boolean_t promisc)
65862dadd65SYuri Pankov {
65962dadd65SYuri Pankov 	vmxnet3_softc_t *dp = data;
66062dadd65SYuri Pankov 
66162dadd65SYuri Pankov 	VMXNET3_DEBUG(dp, 2, "setpromisc(%s)\n", promisc ? "TRUE" : "FALSE");
66262dadd65SYuri Pankov 
66362dadd65SYuri Pankov 	if (promisc) {
66462dadd65SYuri Pankov 		dp->rxMode |= VMXNET3_RXM_PROMISC;
66562dadd65SYuri Pankov 	} else {
66662dadd65SYuri Pankov 		dp->rxMode &= ~VMXNET3_RXM_PROMISC;
66762dadd65SYuri Pankov 	}
66862dadd65SYuri Pankov 
66962dadd65SYuri Pankov 	vmxnet3_refresh_rxfilter(dp);
67062dadd65SYuri Pankov 
6716849994eSSebastien Roy 	return (0);
67262dadd65SYuri Pankov }
67362dadd65SYuri Pankov 
67462dadd65SYuri Pankov /*
675ca5345b6SSebastien Roy  * Add or remove a multicast address from/to a vmxnet3 device.
67662dadd65SYuri Pankov  *
677ca5345b6SSebastien Roy  * Returns:
6786849994eSSebastien Roy  *	0 on success, non-zero on failure.
67962dadd65SYuri Pankov  */
68062dadd65SYuri Pankov static int
vmxnet3_multicst(void * data,boolean_t add,const uint8_t * macaddr)68162dadd65SYuri Pankov vmxnet3_multicst(void *data, boolean_t add, const uint8_t *macaddr)
68262dadd65SYuri Pankov {
68362dadd65SYuri Pankov 	vmxnet3_softc_t *dp = data;
68462dadd65SYuri Pankov 	vmxnet3_dmabuf_t newMfTable;
6856849994eSSebastien Roy 	int ret = 0;
68662dadd65SYuri Pankov 	uint16_t macIdx;
68762dadd65SYuri Pankov 	size_t allocSize;
68862dadd65SYuri Pankov 
68962dadd65SYuri Pankov 	VMXNET3_DEBUG(dp, 2, "multicst(%s, "MACADDR_FMT")\n",
69062dadd65SYuri Pankov 	    add ? "add" : "remove", MACADDR_FMT_ARGS(macaddr));
69162dadd65SYuri Pankov 
69262dadd65SYuri Pankov 	/*
69362dadd65SYuri Pankov 	 * First lookup the position of the given MAC to check if it is
69462dadd65SYuri Pankov 	 * present in the existing MF table.
69562dadd65SYuri Pankov 	 */
69662dadd65SYuri Pankov 	for (macIdx = 0; macIdx < dp->mfTable.bufLen; macIdx += 6) {
69762dadd65SYuri Pankov 		if (memcmp(&dp->mfTable.buf[macIdx], macaddr, 6) == 0) {
69862dadd65SYuri Pankov 			break;
69962dadd65SYuri Pankov 		}
70062dadd65SYuri Pankov 	}
70162dadd65SYuri Pankov 
70262dadd65SYuri Pankov 	/*
70362dadd65SYuri Pankov 	 * Check for 2 situations we can handle gracefully by bailing out:
70462dadd65SYuri Pankov 	 * Adding an already existing filter or removing a non-existing one.
70562dadd65SYuri Pankov 	 */
70662dadd65SYuri Pankov 	if (add && macIdx < dp->mfTable.bufLen) {
70762dadd65SYuri Pankov 		VMXNET3_WARN(dp, MACADDR_FMT " already in MC filter list "
70862dadd65SYuri Pankov 		    "@ %u\n", MACADDR_FMT_ARGS(macaddr), macIdx / 6);
70962dadd65SYuri Pankov 		ASSERT(B_FALSE);
71062dadd65SYuri Pankov 		goto done;
71162dadd65SYuri Pankov 	}
71262dadd65SYuri Pankov 	if (!add && macIdx == dp->mfTable.bufLen) {
71362dadd65SYuri Pankov 		VMXNET3_WARN(dp, MACADDR_FMT " not in MC filter list @ %u\n",
71462dadd65SYuri Pankov 		    MACADDR_FMT_ARGS(macaddr), macIdx / 6);
71562dadd65SYuri Pankov 		ASSERT(B_FALSE);
71662dadd65SYuri Pankov 		goto done;
71762dadd65SYuri Pankov 	}
71862dadd65SYuri Pankov 
71962dadd65SYuri Pankov 	/*
72062dadd65SYuri Pankov 	 * Create the new MF table
72162dadd65SYuri Pankov 	 */
72262dadd65SYuri Pankov 	allocSize = dp->mfTable.bufLen + (add ? 6 : -6);
72362dadd65SYuri Pankov 	if (allocSize) {
72462dadd65SYuri Pankov 		ret = vmxnet3_alloc_dma_mem_1(dp, &newMfTable, allocSize,
72562dadd65SYuri Pankov 		    B_TRUE);
7266849994eSSebastien Roy 		ASSERT(ret == 0);
72762dadd65SYuri Pankov 		if (add) {
72862dadd65SYuri Pankov 			(void) memcpy(newMfTable.buf, dp->mfTable.buf,
72962dadd65SYuri Pankov 			    dp->mfTable.bufLen);
73062dadd65SYuri Pankov 			(void) memcpy(newMfTable.buf + dp->mfTable.bufLen,
73162dadd65SYuri Pankov 			    macaddr, 6);
73262dadd65SYuri Pankov 		} else {
73362dadd65SYuri Pankov 			(void) memcpy(newMfTable.buf, dp->mfTable.buf,
73462dadd65SYuri Pankov 			    macIdx);
73562dadd65SYuri Pankov 			(void) memcpy(newMfTable.buf + macIdx,
73662dadd65SYuri Pankov 			    dp->mfTable.buf + macIdx + 6,
73762dadd65SYuri Pankov 			    dp->mfTable.bufLen - macIdx - 6);
73862dadd65SYuri Pankov 		}
73962dadd65SYuri Pankov 	} else {
74062dadd65SYuri Pankov 		newMfTable.buf = NULL;
74162dadd65SYuri Pankov 		newMfTable.bufPA = 0;
74262dadd65SYuri Pankov 		newMfTable.bufLen = 0;
74362dadd65SYuri Pankov 	}
74462dadd65SYuri Pankov 
74562dadd65SYuri Pankov 	/*
74662dadd65SYuri Pankov 	 * Now handle 2 corner cases: if we're creating the first filter or
74762dadd65SYuri Pankov 	 * removing the last one, we have to update rxMode accordingly.
74862dadd65SYuri Pankov 	 */
74962dadd65SYuri Pankov 	if (add && newMfTable.bufLen == 6) {
75062dadd65SYuri Pankov 		ASSERT(!(dp->rxMode & VMXNET3_RXM_MCAST));
75162dadd65SYuri Pankov 		dp->rxMode |= VMXNET3_RXM_MCAST;
75262dadd65SYuri Pankov 		vmxnet3_refresh_rxfilter(dp);
75362dadd65SYuri Pankov 	}
75462dadd65SYuri Pankov 	if (!add && dp->mfTable.bufLen == 6) {
75562dadd65SYuri Pankov 		ASSERT(newMfTable.buf == NULL);
75662dadd65SYuri Pankov 		ASSERT(dp->rxMode & VMXNET3_RXM_MCAST);
75762dadd65SYuri Pankov 		dp->rxMode &= ~VMXNET3_RXM_MCAST;
75862dadd65SYuri Pankov 		vmxnet3_refresh_rxfilter(dp);
75962dadd65SYuri Pankov 	}
76062dadd65SYuri Pankov 
76162dadd65SYuri Pankov 	/*
76262dadd65SYuri Pankov 	 * Now replace the old MF table with the new one
76362dadd65SYuri Pankov 	 */
76462dadd65SYuri Pankov 	if (dp->mfTable.buf) {
76562dadd65SYuri Pankov 		vmxnet3_free_dma_mem(&dp->mfTable);
76662dadd65SYuri Pankov 	}
76762dadd65SYuri Pankov 	dp->mfTable = newMfTable;
76862dadd65SYuri Pankov 	VMXNET3_DS(dp)->devRead.rxFilterConf.mfTablePA = newMfTable.bufPA;
76962dadd65SYuri Pankov 	VMXNET3_DS(dp)->devRead.rxFilterConf.mfTableLen = newMfTable.bufLen;
77062dadd65SYuri Pankov 
77162dadd65SYuri Pankov done:
77262dadd65SYuri Pankov 	/* Always update the filters */
77362dadd65SYuri Pankov 	VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_MAC_FILTERS);
77462dadd65SYuri Pankov 
77562dadd65SYuri Pankov 	return (ret);
77662dadd65SYuri Pankov }
77762dadd65SYuri Pankov 
77862dadd65SYuri Pankov /*
779ca5345b6SSebastien Roy  * Set the mac address of a vmxnet3 device.
78062dadd65SYuri Pankov  *
781ca5345b6SSebastien Roy  * Returns:
7826849994eSSebastien Roy  *	0
78362dadd65SYuri Pankov  */
78462dadd65SYuri Pankov static int
vmxnet3_unicst(void * data,const uint8_t * macaddr)78562dadd65SYuri Pankov vmxnet3_unicst(void *data, const uint8_t *macaddr)
78662dadd65SYuri Pankov {
78762dadd65SYuri Pankov 	vmxnet3_softc_t *dp = data;
78862dadd65SYuri Pankov 	uint32_t val32;
78962dadd65SYuri Pankov 
79062dadd65SYuri Pankov 	VMXNET3_DEBUG(dp, 2, "unicst("MACADDR_FMT")\n",
79162dadd65SYuri Pankov 	    MACADDR_FMT_ARGS(macaddr));
79262dadd65SYuri Pankov 
79362dadd65SYuri Pankov 	val32 = *((uint32_t *)(macaddr + 0));
79462dadd65SYuri Pankov 	VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_MACL, val32);
79562dadd65SYuri Pankov 	val32 = *((uint16_t *)(macaddr + 4));
79662dadd65SYuri Pankov 	VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_MACH, val32);
79762dadd65SYuri Pankov 
79862dadd65SYuri Pankov 	(void) memcpy(dp->macaddr, macaddr, 6);
79962dadd65SYuri Pankov 
8006849994eSSebastien Roy 	return (0);
80162dadd65SYuri Pankov }
80262dadd65SYuri Pankov 
80362dadd65SYuri Pankov /*
804ca5345b6SSebastien Roy  * Change the MTU as seen by the driver. This is only supported when
805ca5345b6SSebastien Roy  * the mac is stopped.
80662dadd65SYuri Pankov  *
807ca5345b6SSebastien Roy  * Returns:
808ca5345b6SSebastien Roy  *	EBUSY if the device is enabled.
809ca5345b6SSebastien Roy  *	EINVAL for invalid MTU values.
810ca5345b6SSebastien Roy  *	0 on success.
81162dadd65SYuri Pankov  */
81262dadd65SYuri Pankov static int
vmxnet3_change_mtu(vmxnet3_softc_t * dp,uint32_t new_mtu)81362dadd65SYuri Pankov vmxnet3_change_mtu(vmxnet3_softc_t *dp, uint32_t new_mtu)
81462dadd65SYuri Pankov {
81562dadd65SYuri Pankov 	int ret;
81662dadd65SYuri Pankov 
81762dadd65SYuri Pankov 	if (dp->devEnabled)
81862dadd65SYuri Pankov 		return (EBUSY);
81962dadd65SYuri Pankov 
82062dadd65SYuri Pankov 	if (new_mtu == dp->cur_mtu) {
82162dadd65SYuri Pankov 		VMXNET3_WARN(dp, "New MTU is same as old mtu : %d.\n", new_mtu);
82262dadd65SYuri Pankov 		return (0);
82362dadd65SYuri Pankov 	}
82462dadd65SYuri Pankov 
82562dadd65SYuri Pankov 	if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU) {
82662dadd65SYuri Pankov 		VMXNET3_WARN(dp, "New MTU not in valid range [%d, %d].\n",
82762dadd65SYuri Pankov 		    VMXNET3_MIN_MTU, VMXNET3_MAX_MTU);
82862dadd65SYuri Pankov 		return (EINVAL);
829ca5345b6SSebastien Roy 	} else if (new_mtu > ETHERMTU && !dp->allow_jumbo) {
830ca5345b6SSebastien Roy 		VMXNET3_WARN(dp, "MTU cannot be greater than %d because "
831ca5345b6SSebastien Roy 		    "accept-jumbo is not enabled.\n", ETHERMTU);
832ca5345b6SSebastien Roy 		return (EINVAL);
83362dadd65SYuri Pankov 	}
83462dadd65SYuri Pankov 
83562dadd65SYuri Pankov 	dp->cur_mtu = new_mtu;
8366849994eSSebastien Roy 	dp->alloc_ok = VMXNET3_ALLOC_OK(dp);
83762dadd65SYuri Pankov 
83862dadd65SYuri Pankov 	if ((ret = mac_maxsdu_update(dp->mac, new_mtu)) != 0)
83962dadd65SYuri Pankov 		VMXNET3_WARN(dp, "Unable to update mac with %d mtu: %d",
84062dadd65SYuri Pankov 		    new_mtu, ret);
84162dadd65SYuri Pankov 
84262dadd65SYuri Pankov 	return (ret);
84362dadd65SYuri Pankov }
84462dadd65SYuri Pankov 
845ca5345b6SSebastien Roy /* ARGSUSED */
846ca5345b6SSebastien Roy static int
vmxnet3_get_prop(void * data,const char * prop_name,mac_prop_id_t prop_id,uint_t prop_val_size,void * prop_val)847ca5345b6SSebastien Roy vmxnet3_get_prop(void *data, const char *prop_name, mac_prop_id_t prop_id,
848ca5345b6SSebastien Roy     uint_t prop_val_size, void *prop_val)
849ca5345b6SSebastien Roy {
850ca5345b6SSebastien Roy 	vmxnet3_softc_t *dp = data;
851ca5345b6SSebastien Roy 	int ret = 0;
852ca5345b6SSebastien Roy 
853ca5345b6SSebastien Roy 	switch (prop_id) {
854ca5345b6SSebastien Roy 	case MAC_PROP_MTU:
855ca5345b6SSebastien Roy 		ASSERT(prop_val_size >= sizeof (uint32_t));
856ca5345b6SSebastien Roy 		bcopy(&dp->cur_mtu, prop_val, sizeof (uint32_t));
857ca5345b6SSebastien Roy 		break;
858ca5345b6SSebastien Roy 	default:
859ca5345b6SSebastien Roy 		ret = ENOTSUP;
860*5ceaf02cSYuri Pankov 		break;
861ca5345b6SSebastien Roy 	}
862ca5345b6SSebastien Roy 	return (ret);
863ca5345b6SSebastien Roy }
864ca5345b6SSebastien Roy 
865ca5345b6SSebastien Roy /* ARGSUSED */
866ca5345b6SSebastien Roy static int
vmxnet3_set_prop(void * data,const char * prop_name,mac_prop_id_t prop_id,uint_t prop_val_size,const void * prop_val)867ca5345b6SSebastien Roy vmxnet3_set_prop(void *data, const char *prop_name, mac_prop_id_t prop_id,
868ca5345b6SSebastien Roy     uint_t prop_val_size, const void *prop_val)
869ca5345b6SSebastien Roy {
870ca5345b6SSebastien Roy 	vmxnet3_softc_t *dp = data;
871ca5345b6SSebastien Roy 	int ret;
872ca5345b6SSebastien Roy 
873ca5345b6SSebastien Roy 	switch (prop_id) {
874ca5345b6SSebastien Roy 	case MAC_PROP_MTU: {
875ca5345b6SSebastien Roy 		uint32_t new_mtu;
876ca5345b6SSebastien Roy 		ASSERT(prop_val_size >= sizeof (uint32_t));
877ca5345b6SSebastien Roy 		bcopy(prop_val, &new_mtu, sizeof (new_mtu));
878ca5345b6SSebastien Roy 		ret = vmxnet3_change_mtu(dp, new_mtu);
879ca5345b6SSebastien Roy 		break;
880ca5345b6SSebastien Roy 	}
881ca5345b6SSebastien Roy 	default:
882ca5345b6SSebastien Roy 		ret = ENOTSUP;
883*5ceaf02cSYuri Pankov 		break;
884ca5345b6SSebastien Roy 	}
885ca5345b6SSebastien Roy 
886ca5345b6SSebastien Roy 	return (ret);
887ca5345b6SSebastien Roy }
888ca5345b6SSebastien Roy 
889ca5345b6SSebastien Roy /* ARGSUSED */
890ca5345b6SSebastien Roy static void
vmxnet3_prop_info(void * data,const char * prop_name,mac_prop_id_t prop_id,mac_prop_info_handle_t prop_handle)891ca5345b6SSebastien Roy vmxnet3_prop_info(void *data, const char *prop_name, mac_prop_id_t prop_id,
892ca5345b6SSebastien Roy     mac_prop_info_handle_t prop_handle)
893ca5345b6SSebastien Roy {
894ca5345b6SSebastien Roy 	switch (prop_id) {
895ca5345b6SSebastien Roy 	case MAC_PROP_MTU:
896ca5345b6SSebastien Roy 		mac_prop_info_set_range_uint32(prop_handle, VMXNET3_MIN_MTU,
897ca5345b6SSebastien Roy 		    VMXNET3_MAX_MTU);
898ca5345b6SSebastien Roy 		break;
899ca5345b6SSebastien Roy 	default:
900*5ceaf02cSYuri Pankov 		break;
901ca5345b6SSebastien Roy 	}
902ca5345b6SSebastien Roy }
90362dadd65SYuri Pankov 
90462dadd65SYuri Pankov /*
905ca5345b6SSebastien Roy  * DDI/DDK callback to handle IOCTL in driver. Currently it only handles
906ca5345b6SSebastien Roy  * ND_SET ioctl. Rest all are ignored. The ND_SET is used to set/reset
907ca5345b6SSebastien Roy  * accept-jumbo ndd parameted for the interface.
90862dadd65SYuri Pankov  *
90962dadd65SYuri Pankov  * Side effects:
910ca5345b6SSebastien Roy  *	MTU can be changed and device can be reset. An ACK or NACK is conveyed
911ca5345b6SSebastien Roy  *	to the calling function from the mblk which was used to call this
912ca5345b6SSebastien Roy  *	function.
91362dadd65SYuri Pankov  */
91462dadd65SYuri Pankov static void
vmxnet3_ioctl(void * arg,queue_t * wq,mblk_t * mp)91562dadd65SYuri Pankov vmxnet3_ioctl(void *arg, queue_t *wq, mblk_t *mp)
91662dadd65SYuri Pankov {
91762dadd65SYuri Pankov 	vmxnet3_softc_t *dp = arg;
91862dadd65SYuri Pankov 	int ret = EINVAL;
91962dadd65SYuri Pankov 	IOCP iocp;
92062dadd65SYuri Pankov 	mblk_t *mp1;
92162dadd65SYuri Pankov 	char *valp, *param;
92262dadd65SYuri Pankov 	int data;
92362dadd65SYuri Pankov 
92462dadd65SYuri Pankov 	iocp = (void *)mp->b_rptr;
92562dadd65SYuri Pankov 	iocp->ioc_error = 0;
92662dadd65SYuri Pankov 
92762dadd65SYuri Pankov 	switch (iocp->ioc_cmd) {
92862dadd65SYuri Pankov 	case ND_SET:
92962dadd65SYuri Pankov 		/*
93062dadd65SYuri Pankov 		 * The mblk in continuation would contain the ndd parameter name
93162dadd65SYuri Pankov 		 * and data value to be set
93262dadd65SYuri Pankov 		 */
93362dadd65SYuri Pankov 		mp1 = mp->b_cont;
93462dadd65SYuri Pankov 		if (!mp1) {
93562dadd65SYuri Pankov 			VMXNET3_WARN(dp, "Error locating parameter name.\n");
93662dadd65SYuri Pankov 			ret = EINVAL;
93762dadd65SYuri Pankov 			break;
93862dadd65SYuri Pankov 		}
93962dadd65SYuri Pankov 
94062dadd65SYuri Pankov 		/* Force null termination */
94162dadd65SYuri Pankov 		mp1->b_datap->db_lim[-1] = '\0';
94262dadd65SYuri Pankov 
94362dadd65SYuri Pankov 		/*
94462dadd65SYuri Pankov 		 * From /usr/src/uts/common/inet/nd.c : nd_getset()
94562dadd65SYuri Pankov 		 * "logic throughout nd_xxx assumes single data block for ioctl.
94662dadd65SYuri Pankov 		 *  However, existing code sends in some big buffers."
94762dadd65SYuri Pankov 		 */
94862dadd65SYuri Pankov 		if (mp1->b_cont) {
94962dadd65SYuri Pankov 			freemsg(mp1->b_cont);
95062dadd65SYuri Pankov 			mp1->b_cont = NULL;
95162dadd65SYuri Pankov 		}
95262dadd65SYuri Pankov 
95362dadd65SYuri Pankov 		valp = (char *)mp1->b_rptr;	/* Points to param name */
95462dadd65SYuri Pankov 		ASSERT(valp);
95562dadd65SYuri Pankov 		param = valp;
95662dadd65SYuri Pankov 		VMXNET3_DEBUG(dp, 3, "ND Set ioctl for %s\n", param);
95762dadd65SYuri Pankov 
95862dadd65SYuri Pankov 		/*
95962dadd65SYuri Pankov 		 * Go past the end of this null terminated string to get the
96062dadd65SYuri Pankov 		 * data value.
96162dadd65SYuri Pankov 		 */
96262dadd65SYuri Pankov 		while (*valp && valp <= (char *)mp1->b_wptr)
96362dadd65SYuri Pankov 			valp++;
96462dadd65SYuri Pankov 
96562dadd65SYuri Pankov 		if (valp > (char *)mp1->b_wptr) {
96662dadd65SYuri Pankov 			/*
96762dadd65SYuri Pankov 			 * We are already beyond the readable area of mblk and
96862dadd65SYuri Pankov 			 * still haven't found the end of param string.
96962dadd65SYuri Pankov 			 */
97062dadd65SYuri Pankov 			VMXNET3_WARN(dp,
97162dadd65SYuri Pankov 			    "No data value found to be set to param\n");
97262dadd65SYuri Pankov 			data = -1;
97362dadd65SYuri Pankov 		} else {
97462dadd65SYuri Pankov 			/* Now this points to data string */
97562dadd65SYuri Pankov 			valp++;
97662dadd65SYuri Pankov 			/* Get numeric value of first letter */
97762dadd65SYuri Pankov 			data = (int)*valp - (int)'0';
97862dadd65SYuri Pankov 		}
97962dadd65SYuri Pankov 
98062dadd65SYuri Pankov 		if (strcmp("accept-jumbo", param) == 0) {
98162dadd65SYuri Pankov 			if (data == 1) {
98262dadd65SYuri Pankov 				VMXNET3_DEBUG(dp, 2,
98362dadd65SYuri Pankov 				    "Accepting jumbo frames\n");
984ca5345b6SSebastien Roy 				dp->allow_jumbo = B_TRUE;
98562dadd65SYuri Pankov 				ret = vmxnet3_change_mtu(dp, VMXNET3_MAX_MTU);
98662dadd65SYuri Pankov 			} else if (data == 0) {
98762dadd65SYuri Pankov 				VMXNET3_DEBUG(dp, 2,
98862dadd65SYuri Pankov 				    "Rejecting jumbo frames\n");
989ca5345b6SSebastien Roy 				dp->allow_jumbo = B_FALSE;
99062dadd65SYuri Pankov 				ret = vmxnet3_change_mtu(dp, ETHERMTU);
99162dadd65SYuri Pankov 			} else {
99262dadd65SYuri Pankov 				VMXNET3_WARN(dp, "Invalid data value to be set,"
99362dadd65SYuri Pankov 				    " use 0 or 1\n");
99462dadd65SYuri Pankov 				ret = -1;
99562dadd65SYuri Pankov 			}
99662dadd65SYuri Pankov 		}
99762dadd65SYuri Pankov 		freemsg(mp1);
99862dadd65SYuri Pankov 		mp->b_cont = NULL;
99962dadd65SYuri Pankov 		break;
100062dadd65SYuri Pankov 
100162dadd65SYuri Pankov 	default:
100262dadd65SYuri Pankov 		if (mp->b_cont) {
100362dadd65SYuri Pankov 			freemsg(mp->b_cont);
100462dadd65SYuri Pankov 			mp->b_cont = NULL;
100562dadd65SYuri Pankov 		}
100662dadd65SYuri Pankov 		ret = -1;
100762dadd65SYuri Pankov 		break;
100862dadd65SYuri Pankov 	}
100962dadd65SYuri Pankov 
101062dadd65SYuri Pankov 	if (ret == 0)
101162dadd65SYuri Pankov 		miocack(wq, mp, 0, 0);
101262dadd65SYuri Pankov 	else
101362dadd65SYuri Pankov 		miocnak(wq, mp, 0, EINVAL);
101462dadd65SYuri Pankov }
101562dadd65SYuri Pankov 
101662dadd65SYuri Pankov /*
1017ca5345b6SSebastien Roy  * Get the capabilities of a vmxnet3 device.
101862dadd65SYuri Pankov  *
1019ca5345b6SSebastien Roy  * Returns:
1020ca5345b6SSebastien Roy  *	B_TRUE if the capability is supported, B_FALSE otherwise.
102162dadd65SYuri Pankov  */
102262dadd65SYuri Pankov static boolean_t
vmxnet3_getcapab(void * data,mac_capab_t capab,void * arg)102362dadd65SYuri Pankov vmxnet3_getcapab(void *data, mac_capab_t capab, void *arg)
102462dadd65SYuri Pankov {
102562dadd65SYuri Pankov 	vmxnet3_softc_t *dp = data;
102662dadd65SYuri Pankov 	boolean_t ret;
102762dadd65SYuri Pankov 
102862dadd65SYuri Pankov 	switch (capab) {
1029ca5345b6SSebastien Roy 	case MAC_CAPAB_HCKSUM: {
1030ca5345b6SSebastien Roy 		uint32_t *txflags = arg;
1031ca5345b6SSebastien Roy 		*txflags = HCKSUM_INET_PARTIAL;
1032ca5345b6SSebastien Roy 		ret = B_TRUE;
1033ca5345b6SSebastien Roy 		break;
1034ca5345b6SSebastien Roy 	}
1035ca5345b6SSebastien Roy 	case MAC_CAPAB_LSO: {
1036ca5345b6SSebastien Roy 		mac_capab_lso_t *lso = arg;
1037ca5345b6SSebastien Roy 		lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
1038ca5345b6SSebastien Roy 		lso->lso_basic_tcp_ipv4.lso_max = IP_MAXPACKET;
1039ca5345b6SSebastien Roy 		ret = vmxnet3_getprop(dp, "EnableLSO", 0, 1, 1);
1040ca5345b6SSebastien Roy 		break;
1041ca5345b6SSebastien Roy 	}
1042ca5345b6SSebastien Roy 	default:
1043ca5345b6SSebastien Roy 		ret = B_FALSE;
104462dadd65SYuri Pankov 	}
104562dadd65SYuri Pankov 
104662dadd65SYuri Pankov 	VMXNET3_DEBUG(dp, 2, "getcapab(0x%x) -> %s\n", capab,
104762dadd65SYuri Pankov 	    ret ? "yes" : "no");
104862dadd65SYuri Pankov 
104962dadd65SYuri Pankov 	return (ret);
105062dadd65SYuri Pankov }
105162dadd65SYuri Pankov 
105262dadd65SYuri Pankov /*
1053ca5345b6SSebastien Roy  * Reset a vmxnet3 device. Only to be used when the device is wedged.
105462dadd65SYuri Pankov  *
105562dadd65SYuri Pankov  * Side effects:
1056ca5345b6SSebastien Roy  *	The device is reset.
105762dadd65SYuri Pankov  */
105862dadd65SYuri Pankov static void
vmxnet3_reset(void * data)105962dadd65SYuri Pankov vmxnet3_reset(void *data)
106062dadd65SYuri Pankov {
106162dadd65SYuri Pankov 	int ret;
106262dadd65SYuri Pankov 
106362dadd65SYuri Pankov 	vmxnet3_softc_t *dp = data;
106462dadd65SYuri Pankov 
106562dadd65SYuri Pankov 	VMXNET3_DEBUG(dp, 1, "vmxnet3_reset()\n");
106662dadd65SYuri Pankov 
106762dadd65SYuri Pankov 	atomic_inc_32(&dp->reset_count);
106862dadd65SYuri Pankov 	vmxnet3_stop(dp);
106962dadd65SYuri Pankov 	VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
10706849994eSSebastien Roy 	if ((ret = vmxnet3_start(dp)) != 0)
107162dadd65SYuri Pankov 		VMXNET3_WARN(dp, "failed to reset the device: %d", ret);
107262dadd65SYuri Pankov }
107362dadd65SYuri Pankov 
107462dadd65SYuri Pankov /*
1075ca5345b6SSebastien Roy  * Process pending events on a vmxnet3 device.
107662dadd65SYuri Pankov  *
1077ca5345b6SSebastien Roy  * Returns:
1078ca5345b6SSebastien Roy  *	B_TRUE if the link state changed, B_FALSE otherwise.
107962dadd65SYuri Pankov  */
108062dadd65SYuri Pankov static boolean_t
vmxnet3_intr_events(vmxnet3_softc_t * dp)108162dadd65SYuri Pankov vmxnet3_intr_events(vmxnet3_softc_t *dp)
108262dadd65SYuri Pankov {
108362dadd65SYuri Pankov 	Vmxnet3_DriverShared *ds = VMXNET3_DS(dp);
108462dadd65SYuri Pankov 	boolean_t linkStateChanged = B_FALSE;
108562dadd65SYuri Pankov 	uint32_t events = ds->ecr;
108662dadd65SYuri Pankov 
108762dadd65SYuri Pankov 	if (events) {
108862dadd65SYuri Pankov 		VMXNET3_DEBUG(dp, 2, "events(0x%x)\n", events);
108962dadd65SYuri Pankov 		if (events & (VMXNET3_ECR_RQERR | VMXNET3_ECR_TQERR)) {
109062dadd65SYuri Pankov 			Vmxnet3_TxQueueDesc *tqdesc = VMXNET3_TQDESC(dp);
109162dadd65SYuri Pankov 			Vmxnet3_RxQueueDesc *rqdesc = VMXNET3_RQDESC(dp);
109262dadd65SYuri Pankov 
109362dadd65SYuri Pankov 			VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_CMD,
109462dadd65SYuri Pankov 			    VMXNET3_CMD_GET_QUEUE_STATUS);
109562dadd65SYuri Pankov 			if (tqdesc->status.stopped) {
109662dadd65SYuri Pankov 				VMXNET3_WARN(dp, "tq error 0x%x\n",
109762dadd65SYuri Pankov 				    tqdesc->status.error);
109862dadd65SYuri Pankov 			}
109962dadd65SYuri Pankov 			if (rqdesc->status.stopped) {
110062dadd65SYuri Pankov 				VMXNET3_WARN(dp, "rq error 0x%x\n",
110162dadd65SYuri Pankov 				    rqdesc->status.error);
110262dadd65SYuri Pankov 			}
110362dadd65SYuri Pankov 
110462dadd65SYuri Pankov 			if (ddi_taskq_dispatch(dp->resetTask, vmxnet3_reset,
110562dadd65SYuri Pankov 			    dp, DDI_NOSLEEP) == DDI_SUCCESS) {
110662dadd65SYuri Pankov 				VMXNET3_WARN(dp, "reset scheduled\n");
110762dadd65SYuri Pankov 			} else {
110862dadd65SYuri Pankov 				VMXNET3_WARN(dp,
110962dadd65SYuri Pankov 				    "ddi_taskq_dispatch() failed\n");
111062dadd65SYuri Pankov 			}
111162dadd65SYuri Pankov 		}
111262dadd65SYuri Pankov 		if (events & VMXNET3_ECR_LINK) {
111362dadd65SYuri Pankov 			vmxnet3_refresh_linkstate(dp);
111462dadd65SYuri Pankov 			linkStateChanged = B_TRUE;
111562dadd65SYuri Pankov 		}
111662dadd65SYuri Pankov 		if (events & VMXNET3_ECR_DIC) {
111762dadd65SYuri Pankov 			VMXNET3_DEBUG(dp, 1, "device implementation change\n");
111862dadd65SYuri Pankov 		}
111962dadd65SYuri Pankov 		VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_ECR, events);
112062dadd65SYuri Pankov 	}
112162dadd65SYuri Pankov 
112262dadd65SYuri Pankov 	return (linkStateChanged);
112362dadd65SYuri Pankov }
112462dadd65SYuri Pankov 
112562dadd65SYuri Pankov /*
1126ca5345b6SSebastien Roy  * Interrupt handler of a vmxnet3 device.
112762dadd65SYuri Pankov  *
1128ca5345b6SSebastien Roy  * Returns:
1129ca5345b6SSebastien Roy  *	DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED.
113062dadd65SYuri Pankov  */
113162dadd65SYuri Pankov /* ARGSUSED1 */
113262dadd65SYuri Pankov static uint_t
vmxnet3_intr(caddr_t data1,caddr_t data2)113362dadd65SYuri Pankov vmxnet3_intr(caddr_t data1, caddr_t data2)
113462dadd65SYuri Pankov {
113562dadd65SYuri Pankov 	vmxnet3_softc_t *dp = (void *) data1;
113662dadd65SYuri Pankov 
113762dadd65SYuri Pankov 	VMXNET3_DEBUG(dp, 3, "intr()\n");
113862dadd65SYuri Pankov 
113962dadd65SYuri Pankov 	mutex_enter(&dp->intrLock);
114062dadd65SYuri Pankov 
114162dadd65SYuri Pankov 	if (dp->devEnabled) {
114262dadd65SYuri Pankov 		boolean_t linkStateChanged;
114362dadd65SYuri Pankov 		boolean_t mustUpdateTx;
114462dadd65SYuri Pankov 		mblk_t *mps;
114562dadd65SYuri Pankov 
114662dadd65SYuri Pankov 		if (dp->intrType == DDI_INTR_TYPE_FIXED &&
114762dadd65SYuri Pankov 		    !VMXNET3_BAR1_GET32(dp, VMXNET3_REG_ICR)) {
114862dadd65SYuri Pankov 			goto intr_unclaimed;
114962dadd65SYuri Pankov 		}
115062dadd65SYuri Pankov 
115162dadd65SYuri Pankov 		if (dp->intrMaskMode == VMXNET3_IMM_ACTIVE) {
115262dadd65SYuri Pankov 			VMXNET3_BAR0_PUT32(dp, VMXNET3_REG_IMR, 1);
115362dadd65SYuri Pankov 		}
115462dadd65SYuri Pankov 
115562dadd65SYuri Pankov 		linkStateChanged = vmxnet3_intr_events(dp);
115662dadd65SYuri Pankov 		mustUpdateTx = vmxnet3_tx_complete(dp, &dp->txQueue);
115762dadd65SYuri Pankov 		mps = vmxnet3_rx_intr(dp, &dp->rxQueue);
115862dadd65SYuri Pankov 
115962dadd65SYuri Pankov 		mutex_exit(&dp->intrLock);
116062dadd65SYuri Pankov 		VMXNET3_BAR0_PUT32(dp, VMXNET3_REG_IMR, 0);
116162dadd65SYuri Pankov 
116262dadd65SYuri Pankov 		if (linkStateChanged) {
116362dadd65SYuri Pankov 			mac_link_update(dp->mac, dp->linkState);
116462dadd65SYuri Pankov 		}
116562dadd65SYuri Pankov 		if (mustUpdateTx) {
116662dadd65SYuri Pankov 			mac_tx_update(dp->mac);
116762dadd65SYuri Pankov 		}
116862dadd65SYuri Pankov 		if (mps) {
116962dadd65SYuri Pankov 			mac_rx(dp->mac, NULL, mps);
117062dadd65SYuri Pankov 		}
117162dadd65SYuri Pankov 
117262dadd65SYuri Pankov 		return (DDI_INTR_CLAIMED);
117362dadd65SYuri Pankov 	}
117462dadd65SYuri Pankov 
117562dadd65SYuri Pankov intr_unclaimed:
117662dadd65SYuri Pankov 	mutex_exit(&dp->intrLock);
117762dadd65SYuri Pankov 	return (DDI_INTR_UNCLAIMED);
117862dadd65SYuri Pankov }
117962dadd65SYuri Pankov 
118062dadd65SYuri Pankov static int
vmxnet3_kstat_update(kstat_t * ksp,int rw)118162dadd65SYuri Pankov vmxnet3_kstat_update(kstat_t *ksp, int rw)
118262dadd65SYuri Pankov {
118362dadd65SYuri Pankov 	vmxnet3_softc_t *dp = ksp->ks_private;
118462dadd65SYuri Pankov 	vmxnet3_kstats_t *statp = ksp->ks_data;
118562dadd65SYuri Pankov 
118662dadd65SYuri Pankov 	if (rw == KSTAT_WRITE)
118762dadd65SYuri Pankov 		return (EACCES);
118862dadd65SYuri Pankov 
118962dadd65SYuri Pankov 	statp->reset_count.value.ul = dp->reset_count;
119062dadd65SYuri Pankov 	statp->tx_pullup_needed.value.ul = dp->tx_pullup_needed;
119162dadd65SYuri Pankov 	statp->tx_ring_full.value.ul = dp->tx_ring_full;
119262dadd65SYuri Pankov 	statp->rx_alloc_buf.value.ul = dp->rx_alloc_buf;
11936849994eSSebastien Roy 	statp->rx_pool_empty.value.ul = dp->rx_pool_empty;
11946849994eSSebastien Roy 	statp->rx_num_bufs.value.ul = dp->rx_num_bufs;
119562dadd65SYuri Pankov 
119662dadd65SYuri Pankov 	return (0);
119762dadd65SYuri Pankov }
119862dadd65SYuri Pankov 
119962dadd65SYuri Pankov static int
vmxnet3_kstat_init(vmxnet3_softc_t * dp)120062dadd65SYuri Pankov vmxnet3_kstat_init(vmxnet3_softc_t *dp)
120162dadd65SYuri Pankov {
120262dadd65SYuri Pankov 	vmxnet3_kstats_t *statp;
120362dadd65SYuri Pankov 
120462dadd65SYuri Pankov 	dp->devKstats = kstat_create(VMXNET3_MODNAME, dp->instance,
120562dadd65SYuri Pankov 	    "statistics", "dev",  KSTAT_TYPE_NAMED,
120662dadd65SYuri Pankov 	    sizeof (vmxnet3_kstats_t) / sizeof (kstat_named_t), 0);
120762dadd65SYuri Pankov 	if (dp->devKstats == NULL)
120862dadd65SYuri Pankov 		return (DDI_FAILURE);
120962dadd65SYuri Pankov 
121062dadd65SYuri Pankov 	dp->devKstats->ks_update = vmxnet3_kstat_update;
121162dadd65SYuri Pankov 	dp->devKstats->ks_private = dp;
121262dadd65SYuri Pankov 
121362dadd65SYuri Pankov 	statp = dp->devKstats->ks_data;
121462dadd65SYuri Pankov 
121562dadd65SYuri Pankov 	kstat_named_init(&statp->reset_count, "reset_count", KSTAT_DATA_ULONG);
121662dadd65SYuri Pankov 	kstat_named_init(&statp->tx_pullup_needed, "tx_pullup_needed",
121762dadd65SYuri Pankov 	    KSTAT_DATA_ULONG);
121862dadd65SYuri Pankov 	kstat_named_init(&statp->tx_ring_full, "tx_ring_full",
121962dadd65SYuri Pankov 	    KSTAT_DATA_ULONG);
122062dadd65SYuri Pankov 	kstat_named_init(&statp->rx_alloc_buf, "rx_alloc_buf",
122162dadd65SYuri Pankov 	    KSTAT_DATA_ULONG);
12226849994eSSebastien Roy 	kstat_named_init(&statp->rx_pool_empty, "rx_pool_empty",
12236849994eSSebastien Roy 	    KSTAT_DATA_ULONG);
12246849994eSSebastien Roy 	kstat_named_init(&statp->rx_num_bufs, "rx_num_bufs",
12256849994eSSebastien Roy 	    KSTAT_DATA_ULONG);
122662dadd65SYuri Pankov 
122762dadd65SYuri Pankov 	kstat_install(dp->devKstats);
122862dadd65SYuri Pankov 
122962dadd65SYuri Pankov 	return (DDI_SUCCESS);
123062dadd65SYuri Pankov }
123162dadd65SYuri Pankov 
123262dadd65SYuri Pankov /*
1233ca5345b6SSebastien Roy  * Probe and attach a vmxnet3 instance to the stack.
123462dadd65SYuri Pankov  *
1235ca5345b6SSebastien Roy  * Returns:
1236ca5345b6SSebastien Roy  *	DDI_SUCCESS or DDI_FAILURE.
123762dadd65SYuri Pankov  */
123862dadd65SYuri Pankov static int
vmxnet3_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)123962dadd65SYuri Pankov vmxnet3_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
124062dadd65SYuri Pankov {
124162dadd65SYuri Pankov 	vmxnet3_softc_t *dp;
124262dadd65SYuri Pankov 	mac_register_t *macr;
124362dadd65SYuri Pankov 	uint16_t vendorId, devId, ret16;
124462dadd65SYuri Pankov 	uint32_t ret32;
124562dadd65SYuri Pankov 	int ret, err;
124662dadd65SYuri Pankov 	uint_t uret;
124762dadd65SYuri Pankov 
124862dadd65SYuri Pankov 	if (cmd != DDI_ATTACH) {
124962dadd65SYuri Pankov 		goto error;
125062dadd65SYuri Pankov 	}
125162dadd65SYuri Pankov 
125262dadd65SYuri Pankov 	/*
125362dadd65SYuri Pankov 	 * Allocate the soft state
125462dadd65SYuri Pankov 	 */
125562dadd65SYuri Pankov 	dp = kmem_zalloc(sizeof (vmxnet3_softc_t), KM_SLEEP);
125662dadd65SYuri Pankov 	ASSERT(dp);
125762dadd65SYuri Pankov 
125862dadd65SYuri Pankov 	dp->dip = dip;
125962dadd65SYuri Pankov 	dp->instance = ddi_get_instance(dip);
126062dadd65SYuri Pankov 	dp->cur_mtu = ETHERMTU;
1261ca5345b6SSebastien Roy 	dp->allow_jumbo = B_TRUE;
12626849994eSSebastien Roy 	dp->alloc_ok = VMXNET3_ALLOC_OK(dp);
126362dadd65SYuri Pankov 
126462dadd65SYuri Pankov 	VMXNET3_DEBUG(dp, 1, "attach()\n");
126562dadd65SYuri Pankov 
126662dadd65SYuri Pankov 	ddi_set_driver_private(dip, dp);
126762dadd65SYuri Pankov 
126862dadd65SYuri Pankov 	/*
126962dadd65SYuri Pankov 	 * Get access to the PCI bus configuration space
127062dadd65SYuri Pankov 	 */
127162dadd65SYuri Pankov 	if (pci_config_setup(dip, &dp->pciHandle) != DDI_SUCCESS) {
127262dadd65SYuri Pankov 		VMXNET3_WARN(dp, "pci_config_setup() failed\n");
127362dadd65SYuri Pankov 		goto error_soft_state;
127462dadd65SYuri Pankov 	}
127562dadd65SYuri Pankov 
127662dadd65SYuri Pankov 	/*
127762dadd65SYuri Pankov 	 * Make sure the chip is a vmxnet3 device
127862dadd65SYuri Pankov 	 */
127962dadd65SYuri Pankov 	vendorId = pci_config_get16(dp->pciHandle, PCI_CONF_VENID);
128062dadd65SYuri Pankov 	devId = pci_config_get16(dp->pciHandle, PCI_CONF_DEVID);
128162dadd65SYuri Pankov 	if (vendorId != PCI_VENDOR_ID_VMWARE ||
128262dadd65SYuri Pankov 	    devId != PCI_DEVICE_ID_VMWARE_VMXNET3) {
128362dadd65SYuri Pankov 		VMXNET3_WARN(dp, "wrong PCI venid/devid (0x%x, 0x%x)\n",
128462dadd65SYuri Pankov 		    vendorId, devId);
128562dadd65SYuri Pankov 		goto error_pci_config;
128662dadd65SYuri Pankov 	}
128762dadd65SYuri Pankov 
128862dadd65SYuri Pankov 	/*
128962dadd65SYuri Pankov 	 * Make sure we can access the registers through the I/O space
129062dadd65SYuri Pankov 	 */
129162dadd65SYuri Pankov 	ret16 = pci_config_get16(dp->pciHandle, PCI_CONF_COMM);
129262dadd65SYuri Pankov 	ret16 |= PCI_COMM_IO | PCI_COMM_ME;
129362dadd65SYuri Pankov 	pci_config_put16(dp->pciHandle, PCI_CONF_COMM, ret16);
129462dadd65SYuri Pankov 
129562dadd65SYuri Pankov 	/*
129662dadd65SYuri Pankov 	 * Map the I/O space in memory
129762dadd65SYuri Pankov 	 */
129862dadd65SYuri Pankov 	if (ddi_regs_map_setup(dip, 1, &dp->bar0, 0, 0, &vmxnet3_dev_attr,
129962dadd65SYuri Pankov 	    &dp->bar0Handle) != DDI_SUCCESS) {
130062dadd65SYuri Pankov 		VMXNET3_WARN(dp, "ddi_regs_map_setup() for BAR0 failed\n");
130162dadd65SYuri Pankov 		goto error_pci_config;
130262dadd65SYuri Pankov 	}
130362dadd65SYuri Pankov 
130462dadd65SYuri Pankov 	if (ddi_regs_map_setup(dip, 2, &dp->bar1, 0, 0, &vmxnet3_dev_attr,
130562dadd65SYuri Pankov 	    &dp->bar1Handle) != DDI_SUCCESS) {
130662dadd65SYuri Pankov 		VMXNET3_WARN(dp, "ddi_regs_map_setup() for BAR1 failed\n");
130762dadd65SYuri Pankov 		goto error_regs_map_0;
130862dadd65SYuri Pankov 	}
130962dadd65SYuri Pankov 
131062dadd65SYuri Pankov 	/*
131162dadd65SYuri Pankov 	 * Check the version number of the virtual device
131262dadd65SYuri Pankov 	 */
131362dadd65SYuri Pankov 	if (VMXNET3_BAR1_GET32(dp, VMXNET3_REG_VRRS) & 1) {
131462dadd65SYuri Pankov 		VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_VRRS, 1);
131562dadd65SYuri Pankov 	} else {
131662dadd65SYuri Pankov 		VMXNET3_WARN(dp, "incompatible h/w version\n");
131762dadd65SYuri Pankov 		goto error_regs_map_1;
131862dadd65SYuri Pankov 	}
131962dadd65SYuri Pankov 
132062dadd65SYuri Pankov 	if (VMXNET3_BAR1_GET32(dp, VMXNET3_REG_UVRS) & 1) {
132162dadd65SYuri Pankov 		VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_UVRS, 1);
132262dadd65SYuri Pankov 	} else {
132362dadd65SYuri Pankov 		VMXNET3_WARN(dp, "incompatible upt version\n");
132462dadd65SYuri Pankov 		goto error_regs_map_1;
132562dadd65SYuri Pankov 	}
132662dadd65SYuri Pankov 
132762dadd65SYuri Pankov 	if (vmxnet3_kstat_init(dp) != DDI_SUCCESS) {
132862dadd65SYuri Pankov 		VMXNET3_WARN(dp, "unable to initialize kstats");
132962dadd65SYuri Pankov 		goto error_regs_map_1;
133062dadd65SYuri Pankov 	}
133162dadd65SYuri Pankov 
133262dadd65SYuri Pankov 	/*
133362dadd65SYuri Pankov 	 * Read the MAC address from the device
133462dadd65SYuri Pankov 	 */
133562dadd65SYuri Pankov 	ret32 = VMXNET3_BAR1_GET32(dp, VMXNET3_REG_MACL);
133662dadd65SYuri Pankov 	*((uint32_t *)(dp->macaddr + 0)) = ret32;
133762dadd65SYuri Pankov 	ret32 = VMXNET3_BAR1_GET32(dp, VMXNET3_REG_MACH);
133862dadd65SYuri Pankov 	*((uint16_t *)(dp->macaddr + 4)) = ret32;
133962dadd65SYuri Pankov 
134062dadd65SYuri Pankov 	/*
134162dadd65SYuri Pankov 	 * Register with the MAC framework
134262dadd65SYuri Pankov 	 */
134362dadd65SYuri Pankov 	if (!(macr = mac_alloc(MAC_VERSION))) {
134462dadd65SYuri Pankov 		VMXNET3_WARN(dp, "mac_alloc() failed\n");
134562dadd65SYuri Pankov 		goto error_kstat;
134662dadd65SYuri Pankov 	}
134762dadd65SYuri Pankov 
134862dadd65SYuri Pankov 	macr->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
134962dadd65SYuri Pankov 	macr->m_driver = dp;
135062dadd65SYuri Pankov 	macr->m_dip = dip;
135162dadd65SYuri Pankov 	macr->m_instance = 0;
135262dadd65SYuri Pankov 	macr->m_src_addr = dp->macaddr;
135362dadd65SYuri Pankov 	macr->m_dst_addr = NULL;
135462dadd65SYuri Pankov 	macr->m_callbacks = &vmxnet3_mac_callbacks;
1355f061e8dcSRobert Mustacchi 	macr->m_min_sdu = 0;
135662dadd65SYuri Pankov 	macr->m_max_sdu = ETHERMTU;
135762dadd65SYuri Pankov 	macr->m_margin = VLAN_TAGSZ;
135862dadd65SYuri Pankov 	macr->m_pdata = NULL;
135962dadd65SYuri Pankov 	macr->m_pdata_size = 0;
136062dadd65SYuri Pankov 
136162dadd65SYuri Pankov 	ret = mac_register(macr, &dp->mac);
136262dadd65SYuri Pankov 	mac_free(macr);
136362dadd65SYuri Pankov 	if (ret != DDI_SUCCESS) {
136462dadd65SYuri Pankov 		VMXNET3_WARN(dp, "mac_register() failed\n");
136562dadd65SYuri Pankov 		goto error_kstat;
136662dadd65SYuri Pankov 	}
136762dadd65SYuri Pankov 
136862dadd65SYuri Pankov 	/*
136962dadd65SYuri Pankov 	 * Register the interrupt(s) in this order of preference:
137062dadd65SYuri Pankov 	 * MSI-X, MSI, INTx
137162dadd65SYuri Pankov 	 */
137262dadd65SYuri Pankov 	VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_CMD, VMXNET3_CMD_GET_CONF_INTR);
137362dadd65SYuri Pankov 	ret32 = VMXNET3_BAR1_GET32(dp, VMXNET3_REG_CMD);
137462dadd65SYuri Pankov 	switch (ret32 & 0x3) {
137562dadd65SYuri Pankov 	case VMXNET3_IT_AUTO:
137662dadd65SYuri Pankov 	case VMXNET3_IT_MSIX:
137762dadd65SYuri Pankov 		dp->intrType = DDI_INTR_TYPE_MSIX;
137862dadd65SYuri Pankov 		err = ddi_intr_alloc(dip, &dp->intrHandle, dp->intrType, 0, 1,
137962dadd65SYuri Pankov 		    &ret, DDI_INTR_ALLOC_STRICT);
138062dadd65SYuri Pankov 		if (err == DDI_SUCCESS)
138162dadd65SYuri Pankov 			break;
138262dadd65SYuri Pankov 		VMXNET3_DEBUG(dp, 2, "DDI_INTR_TYPE_MSIX failed, err:%d\n",
138362dadd65SYuri Pankov 		    err);
138462dadd65SYuri Pankov 		/* FALLTHROUGH */
138562dadd65SYuri Pankov 	case VMXNET3_IT_MSI:
138662dadd65SYuri Pankov 		dp->intrType = DDI_INTR_TYPE_MSI;
138762dadd65SYuri Pankov 		if (ddi_intr_alloc(dip, &dp->intrHandle, dp->intrType, 0, 1,
138862dadd65SYuri Pankov 		    &ret, DDI_INTR_ALLOC_STRICT) == DDI_SUCCESS)
138962dadd65SYuri Pankov 			break;
139062dadd65SYuri Pankov 		VMXNET3_DEBUG(dp, 2, "DDI_INTR_TYPE_MSI failed\n");
139162dadd65SYuri Pankov 		/* FALLTHROUGH */
139262dadd65SYuri Pankov 	case VMXNET3_IT_INTX:
139362dadd65SYuri Pankov 		dp->intrType = DDI_INTR_TYPE_FIXED;
139462dadd65SYuri Pankov 		if (ddi_intr_alloc(dip, &dp->intrHandle, dp->intrType, 0, 1,
139562dadd65SYuri Pankov 		    &ret, DDI_INTR_ALLOC_STRICT) == DDI_SUCCESS) {
139662dadd65SYuri Pankov 			break;
139762dadd65SYuri Pankov 		}
139862dadd65SYuri Pankov 		VMXNET3_DEBUG(dp, 2, "DDI_INTR_TYPE_INTX failed\n");
139962dadd65SYuri Pankov 		/* FALLTHROUGH */
140062dadd65SYuri Pankov 	default:
140162dadd65SYuri Pankov 		VMXNET3_WARN(dp, "ddi_intr_alloc() failed\n");
140262dadd65SYuri Pankov 		goto error_mac;
140362dadd65SYuri Pankov 	}
140462dadd65SYuri Pankov 	dp->intrMaskMode = (ret32 >> 2) & 0x3;
140562dadd65SYuri Pankov 	if (dp->intrMaskMode == VMXNET3_IMM_LAZY) {
140662dadd65SYuri Pankov 		VMXNET3_WARN(dp, "Lazy masking is not supported\n");
140762dadd65SYuri Pankov 		goto error_intr;
140862dadd65SYuri Pankov 	}
140962dadd65SYuri Pankov 
141062dadd65SYuri Pankov 	if (ddi_intr_get_pri(dp->intrHandle, &uret) != DDI_SUCCESS) {
141162dadd65SYuri Pankov 		VMXNET3_WARN(dp, "ddi_intr_get_pri() failed\n");
141262dadd65SYuri Pankov 		goto error_intr;
141362dadd65SYuri Pankov 	}
141462dadd65SYuri Pankov 
141562dadd65SYuri Pankov 	VMXNET3_DEBUG(dp, 2, "intrType=0x%x, intrMaskMode=0x%x, intrPrio=%u\n",
141662dadd65SYuri Pankov 	    dp->intrType, dp->intrMaskMode, uret);
141762dadd65SYuri Pankov 
141862dadd65SYuri Pankov 	/*
141962dadd65SYuri Pankov 	 * Create a task queue to reset the device if it wedges.
142062dadd65SYuri Pankov 	 */
142162dadd65SYuri Pankov 	dp->resetTask = ddi_taskq_create(dip, "vmxnet3_reset_task", 1,
142262dadd65SYuri Pankov 	    TASKQ_DEFAULTPRI, 0);
142362dadd65SYuri Pankov 	if (!dp->resetTask) {
142462dadd65SYuri Pankov 		VMXNET3_WARN(dp, "ddi_taskq_create() failed()\n");
142562dadd65SYuri Pankov 		goto error_intr;
142662dadd65SYuri Pankov 	}
142762dadd65SYuri Pankov 
142862dadd65SYuri Pankov 	/*
142962dadd65SYuri Pankov 	 * Initialize our mutexes now that we know the interrupt priority
143062dadd65SYuri Pankov 	 * This _must_ be done before ddi_intr_enable()
143162dadd65SYuri Pankov 	 */
143262dadd65SYuri Pankov 	mutex_init(&dp->intrLock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(uret));
143362dadd65SYuri Pankov 	mutex_init(&dp->txLock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(uret));
143462dadd65SYuri Pankov 	mutex_init(&dp->rxPoolLock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(uret));
143562dadd65SYuri Pankov 
143662dadd65SYuri Pankov 	if (ddi_intr_add_handler(dp->intrHandle, vmxnet3_intr,
143762dadd65SYuri Pankov 	    dp, NULL) != DDI_SUCCESS) {
143862dadd65SYuri Pankov 		VMXNET3_WARN(dp, "ddi_intr_add_handler() failed\n");
143962dadd65SYuri Pankov 		goto error_mutexes;
144062dadd65SYuri Pankov 	}
144162dadd65SYuri Pankov 
144262dadd65SYuri Pankov 	err = ddi_intr_get_cap(dp->intrHandle, &dp->intrCap);
144362dadd65SYuri Pankov 	if (err != DDI_SUCCESS) {
144462dadd65SYuri Pankov 		VMXNET3_WARN(dp, "ddi_intr_get_cap() failed %d", err);
144562dadd65SYuri Pankov 		goto error_intr_handler;
144662dadd65SYuri Pankov 	}
144762dadd65SYuri Pankov 
144862dadd65SYuri Pankov 	if (dp->intrCap & DDI_INTR_FLAG_BLOCK) {
144962dadd65SYuri Pankov 		err = ddi_intr_block_enable(&dp->intrHandle, 1);
145062dadd65SYuri Pankov 		if (err != DDI_SUCCESS) {
145162dadd65SYuri Pankov 			VMXNET3_WARN(dp, "ddi_intr_block_enable() failed, "
145262dadd65SYuri Pankov 			    "err:%d\n", err);
145362dadd65SYuri Pankov 			goto error_intr_handler;
145462dadd65SYuri Pankov 		}
145562dadd65SYuri Pankov 	} else {
145662dadd65SYuri Pankov 		err = ddi_intr_enable(dp->intrHandle);
145762dadd65SYuri Pankov 		if ((err != DDI_SUCCESS)) {
145862dadd65SYuri Pankov 			VMXNET3_WARN(dp, "ddi_intr_enable() failed, err:%d\n",
145962dadd65SYuri Pankov 			    err);
146062dadd65SYuri Pankov 			goto error_intr_handler;
146162dadd65SYuri Pankov 		}
146262dadd65SYuri Pankov 	}
146362dadd65SYuri Pankov 
146462dadd65SYuri Pankov 	return (DDI_SUCCESS);
146562dadd65SYuri Pankov 
146662dadd65SYuri Pankov error_intr_handler:
146762dadd65SYuri Pankov 	(void) ddi_intr_remove_handler(dp->intrHandle);
146862dadd65SYuri Pankov error_mutexes:
146962dadd65SYuri Pankov 	mutex_destroy(&dp->rxPoolLock);
147062dadd65SYuri Pankov 	mutex_destroy(&dp->txLock);
147162dadd65SYuri Pankov 	mutex_destroy(&dp->intrLock);
147262dadd65SYuri Pankov 	ddi_taskq_destroy(dp->resetTask);
147362dadd65SYuri Pankov error_intr:
147462dadd65SYuri Pankov 	(void) ddi_intr_free(dp->intrHandle);
147562dadd65SYuri Pankov error_mac:
147662dadd65SYuri Pankov 	(void) mac_unregister(dp->mac);
147762dadd65SYuri Pankov error_kstat:
147862dadd65SYuri Pankov 	kstat_delete(dp->devKstats);
147962dadd65SYuri Pankov error_regs_map_1:
148062dadd65SYuri Pankov 	ddi_regs_map_free(&dp->bar1Handle);
148162dadd65SYuri Pankov error_regs_map_0:
148262dadd65SYuri Pankov 	ddi_regs_map_free(&dp->bar0Handle);
148362dadd65SYuri Pankov error_pci_config:
148462dadd65SYuri Pankov 	pci_config_teardown(&dp->pciHandle);
148562dadd65SYuri Pankov error_soft_state:
148662dadd65SYuri Pankov 	kmem_free(dp, sizeof (vmxnet3_softc_t));
148762dadd65SYuri Pankov error:
148862dadd65SYuri Pankov 	return (DDI_FAILURE);
148962dadd65SYuri Pankov }
149062dadd65SYuri Pankov 
149162dadd65SYuri Pankov /*
1492ca5345b6SSebastien Roy  * Detach a vmxnet3 instance from the stack.
149362dadd65SYuri Pankov  *
1494ca5345b6SSebastien Roy  * Returns:
1495ca5345b6SSebastien Roy  *	DDI_SUCCESS or DDI_FAILURE.
149662dadd65SYuri Pankov  */
149762dadd65SYuri Pankov static int
vmxnet3_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)149862dadd65SYuri Pankov vmxnet3_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
149962dadd65SYuri Pankov {
150062dadd65SYuri Pankov 	vmxnet3_softc_t *dp = ddi_get_driver_private(dip);
150162dadd65SYuri Pankov 	unsigned int retries = 0;
150262dadd65SYuri Pankov 	int ret;
150362dadd65SYuri Pankov 
150462dadd65SYuri Pankov 	VMXNET3_DEBUG(dp, 1, "detach()\n");
150562dadd65SYuri Pankov 
150662dadd65SYuri Pankov 	if (cmd != DDI_DETACH) {
150762dadd65SYuri Pankov 		return (DDI_FAILURE);
150862dadd65SYuri Pankov 	}
150962dadd65SYuri Pankov 
15106849994eSSebastien Roy 	while (dp->rx_num_bufs > 0) {
151162dadd65SYuri Pankov 		if (retries++ < 10) {
151262dadd65SYuri Pankov 			VMXNET3_WARN(dp, "rx pending (%u), waiting 1 second\n",
15136849994eSSebastien Roy 			    dp->rx_num_bufs);
151462dadd65SYuri Pankov 			delay(drv_usectohz(1000000));
151562dadd65SYuri Pankov 		} else {
151662dadd65SYuri Pankov 			VMXNET3_WARN(dp, "giving up\n");
151762dadd65SYuri Pankov 			return (DDI_FAILURE);
151862dadd65SYuri Pankov 		}
151962dadd65SYuri Pankov 	}
152062dadd65SYuri Pankov 
152162dadd65SYuri Pankov 	if (dp->intrCap & DDI_INTR_FLAG_BLOCK) {
152262dadd65SYuri Pankov 		ret = ddi_intr_block_disable(&dp->intrHandle, 1);
152362dadd65SYuri Pankov 	} else {
152462dadd65SYuri Pankov 		ret = ddi_intr_disable(dp->intrHandle);
152562dadd65SYuri Pankov 	}
152662dadd65SYuri Pankov 	if (ret != DDI_SUCCESS) {
152762dadd65SYuri Pankov 		VMXNET3_WARN(dp, "unable to disable interrupts");
152862dadd65SYuri Pankov 		return (DDI_FAILURE);
152962dadd65SYuri Pankov 	}
153062dadd65SYuri Pankov 	if (ddi_intr_remove_handler(dp->intrHandle) != DDI_SUCCESS) {
153162dadd65SYuri Pankov 		VMXNET3_WARN(dp, "unable to remove interrupt handler");
153262dadd65SYuri Pankov 		return (DDI_FAILURE);
153362dadd65SYuri Pankov 	}
153462dadd65SYuri Pankov 	(void) ddi_intr_free(dp->intrHandle);
153562dadd65SYuri Pankov 
153662dadd65SYuri Pankov 	VERIFY(mac_unregister(dp->mac) == 0);
153762dadd65SYuri Pankov 
153862dadd65SYuri Pankov 	kstat_delete(dp->devKstats);
153962dadd65SYuri Pankov 
154062dadd65SYuri Pankov 	if (dp->mfTable.buf) {
154162dadd65SYuri Pankov 		vmxnet3_free_dma_mem(&dp->mfTable);
154262dadd65SYuri Pankov 	}
154362dadd65SYuri Pankov 
154462dadd65SYuri Pankov 	mutex_destroy(&dp->rxPoolLock);
154562dadd65SYuri Pankov 	mutex_destroy(&dp->txLock);
154662dadd65SYuri Pankov 	mutex_destroy(&dp->intrLock);
154762dadd65SYuri Pankov 	ddi_taskq_destroy(dp->resetTask);
154862dadd65SYuri Pankov 
154962dadd65SYuri Pankov 	ddi_regs_map_free(&dp->bar1Handle);
155062dadd65SYuri Pankov 	ddi_regs_map_free(&dp->bar0Handle);
155162dadd65SYuri Pankov 	pci_config_teardown(&dp->pciHandle);
155262dadd65SYuri Pankov 
155362dadd65SYuri Pankov 	kmem_free(dp, sizeof (vmxnet3_softc_t));
155462dadd65SYuri Pankov 
155562dadd65SYuri Pankov 	return (DDI_SUCCESS);
155662dadd65SYuri Pankov }
155762dadd65SYuri Pankov 
155862dadd65SYuri Pankov /*
155962dadd65SYuri Pankov  * Structures used by the module loader
156062dadd65SYuri Pankov  */
156162dadd65SYuri Pankov 
1562ca5345b6SSebastien Roy #define	VMXNET3_IDENT "VMware Ethernet v3 " VMXNET3_DRIVER_VERSION_STRING
156362dadd65SYuri Pankov 
156462dadd65SYuri Pankov DDI_DEFINE_STREAM_OPS(
156562dadd65SYuri Pankov 	vmxnet3_dev_ops,
156662dadd65SYuri Pankov 	nulldev,
156762dadd65SYuri Pankov 	nulldev,
156862dadd65SYuri Pankov 	vmxnet3_attach,
156962dadd65SYuri Pankov 	vmxnet3_detach,
157062dadd65SYuri Pankov 	nodev,
157162dadd65SYuri Pankov 	NULL,
157262dadd65SYuri Pankov 	D_NEW | D_MP,
157362dadd65SYuri Pankov 	NULL,
157462dadd65SYuri Pankov 	ddi_quiesce_not_supported);
157562dadd65SYuri Pankov 
157662dadd65SYuri Pankov static struct modldrv vmxnet3_modldrv = {
157762dadd65SYuri Pankov 	&mod_driverops,		/* drv_modops */
157862dadd65SYuri Pankov 	VMXNET3_IDENT,		/* drv_linkinfo */
157962dadd65SYuri Pankov 	&vmxnet3_dev_ops	/* drv_dev_ops */
158062dadd65SYuri Pankov };
158162dadd65SYuri Pankov 
158262dadd65SYuri Pankov static struct modlinkage vmxnet3_modlinkage = {
158362dadd65SYuri Pankov 	MODREV_1,			/* ml_rev */
158462dadd65SYuri Pankov 	{ &vmxnet3_modldrv, NULL }	/* ml_linkage */
158562dadd65SYuri Pankov };
158662dadd65SYuri Pankov 
158762dadd65SYuri Pankov /* Module load entry point */
158862dadd65SYuri Pankov int
_init(void)158962dadd65SYuri Pankov _init(void)
159062dadd65SYuri Pankov {
159162dadd65SYuri Pankov 	int ret;
159262dadd65SYuri Pankov 
159362dadd65SYuri Pankov 	mac_init_ops(&vmxnet3_dev_ops, VMXNET3_MODNAME);
159462dadd65SYuri Pankov 	ret = mod_install(&vmxnet3_modlinkage);
159562dadd65SYuri Pankov 	if (ret != DDI_SUCCESS) {
159662dadd65SYuri Pankov 		mac_fini_ops(&vmxnet3_dev_ops);
159762dadd65SYuri Pankov 	}
159862dadd65SYuri Pankov 
159962dadd65SYuri Pankov 	return (ret);
160062dadd65SYuri Pankov }
160162dadd65SYuri Pankov 
160262dadd65SYuri Pankov /* Module unload entry point */
160362dadd65SYuri Pankov int
_fini(void)160462dadd65SYuri Pankov _fini(void)
160562dadd65SYuri Pankov {
160662dadd65SYuri Pankov 	int ret;
160762dadd65SYuri Pankov 
160862dadd65SYuri Pankov 	ret = mod_remove(&vmxnet3_modlinkage);
160962dadd65SYuri Pankov 	if (ret == DDI_SUCCESS) {
161062dadd65SYuri Pankov 		mac_fini_ops(&vmxnet3_dev_ops);
161162dadd65SYuri Pankov 	}
161262dadd65SYuri Pankov 
161362dadd65SYuri Pankov 	return (ret);
161462dadd65SYuri Pankov }
161562dadd65SYuri Pankov 
161662dadd65SYuri Pankov /* Module info entry point */
161762dadd65SYuri Pankov int
_info(struct modinfo * modinfop)161862dadd65SYuri Pankov _info(struct modinfo *modinfop)
161962dadd65SYuri Pankov {
162062dadd65SYuri Pankov 	return (mod_info(&vmxnet3_modlinkage, modinfop));
162162dadd65SYuri Pankov }
1622