162dadd65SYuri Pankov /*
262dadd65SYuri Pankov  * Copyright (C) 2007 VMware, Inc. All rights reserved.
362dadd65SYuri Pankov  *
462dadd65SYuri Pankov  * The contents of this file are subject to the terms of the Common
562dadd65SYuri Pankov  * Development and Distribution License (the "License") version 1.0
662dadd65SYuri Pankov  * and no later version.  You may not use this file except in
762dadd65SYuri Pankov  * compliance with the License.
862dadd65SYuri Pankov  *
962dadd65SYuri Pankov  * You can obtain a copy of the License at
1062dadd65SYuri Pankov  *         http://www.opensource.org/licenses/cddl1.php
1162dadd65SYuri Pankov  *
1262dadd65SYuri Pankov  * See the License for the specific language governing permissions
1362dadd65SYuri Pankov  * and limitations under the License.
1462dadd65SYuri Pankov  */
1562dadd65SYuri Pankov 
1662dadd65SYuri Pankov /*
176849994eSSebastien Roy  * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
1862dadd65SYuri Pankov  */
1962dadd65SYuri Pankov 
2062dadd65SYuri Pankov #include <vmxnet3.h>
2162dadd65SYuri Pankov 
2262dadd65SYuri Pankov typedef enum vmxnet3_txstatus {
2362dadd65SYuri Pankov 	VMXNET3_TX_OK,
2462dadd65SYuri Pankov 	VMXNET3_TX_FAILURE,
2562dadd65SYuri Pankov 	VMXNET3_TX_PULLUP,
2662dadd65SYuri Pankov 	VMXNET3_TX_RINGFULL
2762dadd65SYuri Pankov } vmxnet3_txstatus;
2862dadd65SYuri Pankov 
2962dadd65SYuri Pankov typedef struct vmxnet3_offload_t {
3062dadd65SYuri Pankov 	uint16_t om;
3162dadd65SYuri Pankov 	uint16_t hlen;
3262dadd65SYuri Pankov 	uint16_t msscof;
3362dadd65SYuri Pankov } vmxnet3_offload_t;
3462dadd65SYuri Pankov 
3562dadd65SYuri Pankov /*
36ca5345b6SSebastien Roy  * Initialize a TxQueue. Currently nothing needs to be done.
3762dadd65SYuri Pankov  */
3862dadd65SYuri Pankov /* ARGSUSED */
3962dadd65SYuri Pankov int
4062dadd65SYuri Pankov vmxnet3_txqueue_init(vmxnet3_softc_t *dp, vmxnet3_txqueue_t *txq)
4162dadd65SYuri Pankov {
426849994eSSebastien Roy 	return (0);
4362dadd65SYuri Pankov }
4462dadd65SYuri Pankov 
4562dadd65SYuri Pankov /*
46ca5345b6SSebastien Roy  * Finish a TxQueue by freeing all pending Tx.
4762dadd65SYuri Pankov  */
4862dadd65SYuri Pankov void
4962dadd65SYuri Pankov vmxnet3_txqueue_fini(vmxnet3_softc_t *dp, vmxnet3_txqueue_t *txq)
5062dadd65SYuri Pankov {
5162dadd65SYuri Pankov 	unsigned int i;
5262dadd65SYuri Pankov 
5362dadd65SYuri Pankov 	ASSERT(!dp->devEnabled);
5462dadd65SYuri Pankov 
5562dadd65SYuri Pankov 	for (i = 0; i < txq->cmdRing.size; i++) {
5662dadd65SYuri Pankov 		mblk_t *mp = txq->metaRing[i].mp;
5762dadd65SYuri Pankov 		if (mp) {
5862dadd65SYuri Pankov 			freemsg(mp);
5962dadd65SYuri Pankov 		}
6062dadd65SYuri Pankov 	}
6162dadd65SYuri Pankov }
6262dadd65SYuri Pankov 
6362dadd65SYuri Pankov /*
64ca5345b6SSebastien Roy  * Build the offload context of a msg.
6562dadd65SYuri Pankov  *
66ca5345b6SSebastien Roy  * Returns:
67ca5345b6SSebastien Roy  *	0 if everything went well.
68ca5345b6SSebastien Roy  *	+n if n bytes need to be pulled up.
69ca5345b6SSebastien Roy  *	-1 in case of error (not used).
7062dadd65SYuri Pankov  */
7162dadd65SYuri Pankov static int
7262dadd65SYuri Pankov vmxnet3_tx_prepare_offload(vmxnet3_softc_t *dp, vmxnet3_offload_t *ol,
7362dadd65SYuri Pankov     mblk_t *mp)
7462dadd65SYuri Pankov {
7562dadd65SYuri Pankov 	int ret = 0;
76ca5345b6SSebastien Roy 	uint32_t start, stuff, value, flags, lso_flag, mss;
7762dadd65SYuri Pankov 
7862dadd65SYuri Pankov 	ol->om = VMXNET3_OM_NONE;
7962dadd65SYuri Pankov 	ol->hlen = 0;
8062dadd65SYuri Pankov 	ol->msscof = 0;
8162dadd65SYuri Pankov 
8262dadd65SYuri Pankov 	hcksum_retrieve(mp, NULL, NULL, &start, &stuff, NULL, &value, &flags);
8362dadd65SYuri Pankov 
84ca5345b6SSebastien Roy 	mac_lso_get(mp, &mss, &lso_flag);
8562dadd65SYuri Pankov 
86ca5345b6SSebastien Roy 	if (flags || lso_flag) {
8762dadd65SYuri Pankov 		struct ether_vlan_header *eth = (void *) mp->b_rptr;
8862dadd65SYuri Pankov 		uint8_t ethLen;
8962dadd65SYuri Pankov 
9062dadd65SYuri Pankov 		if (eth->ether_tpid == htons(ETHERTYPE_VLAN)) {
9162dadd65SYuri Pankov 			ethLen = sizeof (struct ether_vlan_header);
9262dadd65SYuri Pankov 		} else {
9362dadd65SYuri Pankov 			ethLen = sizeof (struct ether_header);
9462dadd65SYuri Pankov 		}
9562dadd65SYuri Pankov 
9662dadd65SYuri Pankov 		VMXNET3_DEBUG(dp, 4, "flags=0x%x, ethLen=%u, start=%u, "
9762dadd65SYuri Pankov 		    "stuff=%u, value=%u\n", flags, ethLen, start, stuff, value);
9862dadd65SYuri Pankov 
99ca5345b6SSebastien Roy 		if (lso_flag & HW_LSO) {
10062dadd65SYuri Pankov 			mblk_t *mblk = mp;
10162dadd65SYuri Pankov 			uint8_t *ip, *tcp;
10262dadd65SYuri Pankov 			uint8_t ipLen, tcpLen;
10362dadd65SYuri Pankov 
10462dadd65SYuri Pankov 			/*
10562dadd65SYuri Pankov 			 * Copy e1000g's behavior:
10662dadd65SYuri Pankov 			 * - Do not assume all the headers are in the same mblk.
10762dadd65SYuri Pankov 			 * - Assume each header is always within one mblk.
10862dadd65SYuri Pankov 			 * - Assume the ethernet header is in the first mblk.
10962dadd65SYuri Pankov 			 */
11062dadd65SYuri Pankov 			ip = mblk->b_rptr + ethLen;
11162dadd65SYuri Pankov 			if (ip >= mblk->b_wptr) {
11262dadd65SYuri Pankov 				mblk = mblk->b_cont;
11362dadd65SYuri Pankov 				ip = mblk->b_rptr;
11462dadd65SYuri Pankov 			}
11562dadd65SYuri Pankov 			ipLen = IPH_HDR_LENGTH((ipha_t *)ip);
11662dadd65SYuri Pankov 			tcp = ip + ipLen;
11762dadd65SYuri Pankov 			if (tcp >= mblk->b_wptr) {
11862dadd65SYuri Pankov 				mblk = mblk->b_cont;
11962dadd65SYuri Pankov 				tcp = mblk->b_rptr;
12062dadd65SYuri Pankov 			}
12162dadd65SYuri Pankov 			tcpLen = TCP_HDR_LENGTH((tcph_t *)tcp);
12262dadd65SYuri Pankov 			/* Careful, '>' instead of '>=' here */
12362dadd65SYuri Pankov 			if (tcp + tcpLen > mblk->b_wptr) {
12462dadd65SYuri Pankov 				mblk = mblk->b_cont;
12562dadd65SYuri Pankov 			}
12662dadd65SYuri Pankov 
12762dadd65SYuri Pankov 			ol->om = VMXNET3_OM_TSO;
12862dadd65SYuri Pankov 			ol->hlen = ethLen + ipLen + tcpLen;
129ca5345b6SSebastien Roy 			ol->msscof = mss;
13062dadd65SYuri Pankov 
13162dadd65SYuri Pankov 			if (mblk != mp) {
13262dadd65SYuri Pankov 				ret = ol->hlen;
13362dadd65SYuri Pankov 			}
134ca5345b6SSebastien Roy 		} else if (flags & HCK_PARTIALCKSUM) {
135ca5345b6SSebastien Roy 			ol->om = VMXNET3_OM_CSUM;
136ca5345b6SSebastien Roy 			ol->hlen = start + ethLen;
137ca5345b6SSebastien Roy 			ol->msscof = stuff + ethLen;
13862dadd65SYuri Pankov 		}
13962dadd65SYuri Pankov 	}
14062dadd65SYuri Pankov 
14162dadd65SYuri Pankov 	return (ret);
14262dadd65SYuri Pankov }
14362dadd65SYuri Pankov 
14462dadd65SYuri Pankov /*
145ca5345b6SSebastien Roy  * Map a msg into the Tx command ring of a vmxnet3 device.
14662dadd65SYuri Pankov  *
147ca5345b6SSebastien Roy  * Returns:
148ca5345b6SSebastien Roy  *	VMXNET3_TX_OK if everything went well.
149ca5345b6SSebastien Roy  *	VMXNET3_TX_RINGFULL if the ring is nearly full.
150ca5345b6SSebastien Roy  *	VMXNET3_TX_PULLUP if the msg is overfragmented.
151ca5345b6SSebastien Roy  *	VMXNET3_TX_FAILURE if there was a DMA or offload error.
15262dadd65SYuri Pankov  *
15362dadd65SYuri Pankov  * Side effects:
154ca5345b6SSebastien Roy  *	The ring is filled if VMXNET3_TX_OK is returned.
15562dadd65SYuri Pankov  */
15662dadd65SYuri Pankov static vmxnet3_txstatus
15762dadd65SYuri Pankov vmxnet3_tx_one(vmxnet3_softc_t *dp, vmxnet3_txqueue_t *txq,
15862dadd65SYuri Pankov     vmxnet3_offload_t *ol, mblk_t *mp)
15962dadd65SYuri Pankov {
16062dadd65SYuri Pankov 	int ret = VMXNET3_TX_OK;
16162dadd65SYuri Pankov 	unsigned int frags = 0, totLen = 0;
16262dadd65SYuri Pankov 	vmxnet3_cmdring_t *cmdRing = &txq->cmdRing;
16362dadd65SYuri Pankov 	Vmxnet3_TxQueueCtrl *txqCtrl = txq->sharedCtrl;
16462dadd65SYuri Pankov 	Vmxnet3_GenericDesc *txDesc;
16562dadd65SYuri Pankov 	uint16_t sopIdx, eopIdx;
16662dadd65SYuri Pankov 	uint8_t sopGen, curGen;
16762dadd65SYuri Pankov 	mblk_t *mblk;
16862dadd65SYuri Pankov 
16962dadd65SYuri Pankov 	mutex_enter(&dp->txLock);
17062dadd65SYuri Pankov 
17162dadd65SYuri Pankov 	sopIdx = eopIdx = cmdRing->next2fill;
17262dadd65SYuri Pankov 	sopGen = cmdRing->gen;
17362dadd65SYuri Pankov 	curGen = !cmdRing->gen;
17462dadd65SYuri Pankov 
17562dadd65SYuri Pankov 	for (mblk = mp; mblk != NULL; mblk = mblk->b_cont) {
17662dadd65SYuri Pankov 		unsigned int len = MBLKL(mblk);
17762dadd65SYuri Pankov 		ddi_dma_cookie_t cookie;
17862dadd65SYuri Pankov 		uint_t cookieCount;
17962dadd65SYuri Pankov 
18062dadd65SYuri Pankov 		if (len) {
18162dadd65SYuri Pankov 			totLen += len;
18262dadd65SYuri Pankov 		} else {
18362dadd65SYuri Pankov 			continue;
18462dadd65SYuri Pankov 		}
18562dadd65SYuri Pankov 
18662dadd65SYuri Pankov 		if (ddi_dma_addr_bind_handle(dp->txDmaHandle, NULL,
18762dadd65SYuri Pankov 		    (caddr_t)mblk->b_rptr, len,
18862dadd65SYuri Pankov 		    DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, NULL,
18962dadd65SYuri Pankov 		    &cookie, &cookieCount) != DDI_DMA_MAPPED) {
19062dadd65SYuri Pankov 			VMXNET3_WARN(dp, "ddi_dma_addr_bind_handle() failed\n");
19162dadd65SYuri Pankov 			ret = VMXNET3_TX_FAILURE;
19262dadd65SYuri Pankov 			goto error;
19362dadd65SYuri Pankov 		}
19462dadd65SYuri Pankov 
19562dadd65SYuri Pankov 		ASSERT(cookieCount);
19662dadd65SYuri Pankov 
19762dadd65SYuri Pankov 		do {
19862dadd65SYuri Pankov 			uint64_t addr = cookie.dmac_laddress;
19962dadd65SYuri Pankov 			size_t len = cookie.dmac_size;
20062dadd65SYuri Pankov 
20162dadd65SYuri Pankov 			do {
20262dadd65SYuri Pankov 				uint32_t dw2, dw3;
20362dadd65SYuri Pankov 				size_t chunkLen;
20462dadd65SYuri Pankov 
20562dadd65SYuri Pankov 				ASSERT(!txq->metaRing[eopIdx].mp);
20662dadd65SYuri Pankov 				ASSERT(cmdRing->avail - frags);
20762dadd65SYuri Pankov 
20862dadd65SYuri Pankov 				if (frags >= cmdRing->size - 1 ||
20962dadd65SYuri Pankov 				    (ol->om != VMXNET3_OM_TSO &&
21062dadd65SYuri Pankov 				    frags >= VMXNET3_MAX_TXD_PER_PKT)) {
21162dadd65SYuri Pankov 					VMXNET3_DEBUG(dp, 2,
21262dadd65SYuri Pankov 					    "overfragmented mp (%u)\n", frags);
21362dadd65SYuri Pankov 					(void) ddi_dma_unbind_handle(
21462dadd65SYuri Pankov 					    dp->txDmaHandle);
21562dadd65SYuri Pankov 					ret = VMXNET3_TX_PULLUP;
21662dadd65SYuri Pankov 					goto error;
21762dadd65SYuri Pankov 				}
21862dadd65SYuri Pankov 				if (cmdRing->avail - frags <= 1) {
21962dadd65SYuri Pankov 					dp->txMustResched = B_TRUE;
22062dadd65SYuri Pankov 					(void) ddi_dma_unbind_handle(
22162dadd65SYuri Pankov 					    dp->txDmaHandle);
22262dadd65SYuri Pankov 					ret = VMXNET3_TX_RINGFULL;
22362dadd65SYuri Pankov 					goto error;
22462dadd65SYuri Pankov 				}
22562dadd65SYuri Pankov 
22662dadd65SYuri Pankov 				if (len > VMXNET3_MAX_TX_BUF_SIZE) {
22762dadd65SYuri Pankov 					chunkLen = VMXNET3_MAX_TX_BUF_SIZE;
22862dadd65SYuri Pankov 				} else {
22962dadd65SYuri Pankov 					chunkLen = len;
23062dadd65SYuri Pankov 				}
23162dadd65SYuri Pankov 
23262dadd65SYuri Pankov 				frags++;
23362dadd65SYuri Pankov 				eopIdx = cmdRing->next2fill;
23462dadd65SYuri Pankov 
23562dadd65SYuri Pankov 				txDesc = VMXNET3_GET_DESC(cmdRing, eopIdx);
23662dadd65SYuri Pankov 				ASSERT(txDesc->txd.gen != cmdRing->gen);
23762dadd65SYuri Pankov 
23862dadd65SYuri Pankov 				/* txd.addr */
23962dadd65SYuri Pankov 				txDesc->txd.addr = addr;
24062dadd65SYuri Pankov 				/* txd.dw2 */
24162dadd65SYuri Pankov 				dw2 = chunkLen == VMXNET3_MAX_TX_BUF_SIZE ?
24262dadd65SYuri Pankov 				    0 : chunkLen;
24362dadd65SYuri Pankov 				dw2 |= curGen << VMXNET3_TXD_GEN_SHIFT;
24462dadd65SYuri Pankov 				txDesc->dword[2] = dw2;
24562dadd65SYuri Pankov 				ASSERT(txDesc->txd.len == len ||
24662dadd65SYuri Pankov 				    txDesc->txd.len == 0);
24762dadd65SYuri Pankov 				/* txd.dw3 */
24862dadd65SYuri Pankov 				dw3 = 0;
24962dadd65SYuri Pankov 				txDesc->dword[3] = dw3;
25062dadd65SYuri Pankov 
25162dadd65SYuri Pankov 				VMXNET3_INC_RING_IDX(cmdRing,
25262dadd65SYuri Pankov 				    cmdRing->next2fill);
25362dadd65SYuri Pankov 				curGen = cmdRing->gen;
25462dadd65SYuri Pankov 
25562dadd65SYuri Pankov 				addr += chunkLen;
25662dadd65SYuri Pankov 				len -= chunkLen;
25762dadd65SYuri Pankov 			} while (len);
25862dadd65SYuri Pankov 
25962dadd65SYuri Pankov 			if (--cookieCount) {
26062dadd65SYuri Pankov 				ddi_dma_nextcookie(dp->txDmaHandle, &cookie);
26162dadd65SYuri Pankov 			}
26262dadd65SYuri Pankov 		} while (cookieCount);
26362dadd65SYuri Pankov 
26462dadd65SYuri Pankov 		(void) ddi_dma_unbind_handle(dp->txDmaHandle);
26562dadd65SYuri Pankov 	}
26662dadd65SYuri Pankov 
26762dadd65SYuri Pankov 	/* Update the EOP descriptor */
26862dadd65SYuri Pankov 	txDesc = VMXNET3_GET_DESC(cmdRing, eopIdx);
26962dadd65SYuri Pankov 	txDesc->dword[3] |= VMXNET3_TXD_CQ | VMXNET3_TXD_EOP;
27062dadd65SYuri Pankov 
27162dadd65SYuri Pankov 	/* Update the SOP descriptor. Must be done last */
27262dadd65SYuri Pankov 	txDesc = VMXNET3_GET_DESC(cmdRing, sopIdx);
27362dadd65SYuri Pankov 	if (ol->om == VMXNET3_OM_TSO && txDesc->txd.len != 0 &&
27462dadd65SYuri Pankov 	    txDesc->txd.len < ol->hlen) {
27562dadd65SYuri Pankov 		ret = VMXNET3_TX_FAILURE;
27662dadd65SYuri Pankov 		goto error;
27762dadd65SYuri Pankov 	}
27862dadd65SYuri Pankov 	txDesc->txd.om = ol->om;
27962dadd65SYuri Pankov 	txDesc->txd.hlen = ol->hlen;
28062dadd65SYuri Pankov 	txDesc->txd.msscof = ol->msscof;
28162dadd65SYuri Pankov 	membar_producer();
28262dadd65SYuri Pankov 	txDesc->txd.gen = sopGen;
28362dadd65SYuri Pankov 
28462dadd65SYuri Pankov 	/* Update the meta ring & metadata */
28562dadd65SYuri Pankov 	txq->metaRing[sopIdx].mp = mp;
28662dadd65SYuri Pankov 	txq->metaRing[eopIdx].sopIdx = sopIdx;
28762dadd65SYuri Pankov 	txq->metaRing[eopIdx].frags = frags;
28862dadd65SYuri Pankov 	cmdRing->avail -= frags;
28962dadd65SYuri Pankov 	if (ol->om == VMXNET3_OM_TSO) {
29062dadd65SYuri Pankov 		txqCtrl->txNumDeferred +=
29162dadd65SYuri Pankov 		    (totLen - ol->hlen + ol->msscof - 1) / ol->msscof;
29262dadd65SYuri Pankov 	} else {
29362dadd65SYuri Pankov 		txqCtrl->txNumDeferred++;
29462dadd65SYuri Pankov 	}
29562dadd65SYuri Pankov 
296*5ceaf02cSYuri Pankov 	VMXNET3_DEBUG(dp, 3, "tx 0x%p on [%u;%u]\n", (void *)mp, sopIdx,
297*5ceaf02cSYuri Pankov 	    eopIdx);
29862dadd65SYuri Pankov 
29962dadd65SYuri Pankov 	goto done;
30062dadd65SYuri Pankov 
30162dadd65SYuri Pankov error:
30262dadd65SYuri Pankov 	/* Reverse the generation bits */
30362dadd65SYuri Pankov 	while (sopIdx != cmdRing->next2fill) {
30462dadd65SYuri Pankov 		VMXNET3_DEC_RING_IDX(cmdRing, cmdRing->next2fill);
30562dadd65SYuri Pankov 		txDesc = VMXNET3_GET_DESC(cmdRing, cmdRing->next2fill);
30662dadd65SYuri Pankov 		txDesc->txd.gen = !cmdRing->gen;
30762dadd65SYuri Pankov 	}
30862dadd65SYuri Pankov 
30962dadd65SYuri Pankov done:
31062dadd65SYuri Pankov 	mutex_exit(&dp->txLock);
31162dadd65SYuri Pankov 
31262dadd65SYuri Pankov 	return (ret);
31362dadd65SYuri Pankov }
31462dadd65SYuri Pankov 
31562dadd65SYuri Pankov /*
316ca5345b6SSebastien Roy  * Send packets on a vmxnet3 device.
31762dadd65SYuri Pankov  *
318ca5345b6SSebastien Roy  * Returns:
319ca5345b6SSebastien Roy  *	NULL in case of success or failure.
320ca5345b6SSebastien Roy  *	The mps to be retransmitted later if the ring is full.
32162dadd65SYuri Pankov  */
32262dadd65SYuri Pankov mblk_t *
32362dadd65SYuri Pankov vmxnet3_tx(void *data, mblk_t *mps)
32462dadd65SYuri Pankov {
32562dadd65SYuri Pankov 	vmxnet3_softc_t *dp = data;
32662dadd65SYuri Pankov 	vmxnet3_txqueue_t *txq = &dp->txQueue;
32762dadd65SYuri Pankov 	vmxnet3_cmdring_t *cmdRing = &txq->cmdRing;
32862dadd65SYuri Pankov 	Vmxnet3_TxQueueCtrl *txqCtrl = txq->sharedCtrl;
32962dadd65SYuri Pankov 	vmxnet3_txstatus status = VMXNET3_TX_OK;
33062dadd65SYuri Pankov 	mblk_t *mp;
33162dadd65SYuri Pankov 
33262dadd65SYuri Pankov 	ASSERT(mps != NULL);
33362dadd65SYuri Pankov 
33462dadd65SYuri Pankov 	do {
33562dadd65SYuri Pankov 		vmxnet3_offload_t ol;
33662dadd65SYuri Pankov 		int pullup;
33762dadd65SYuri Pankov 
33862dadd65SYuri Pankov 		mp = mps;
33962dadd65SYuri Pankov 		mps = mp->b_next;
34062dadd65SYuri Pankov 		mp->b_next = NULL;
34162dadd65SYuri Pankov 
34262dadd65SYuri Pankov 		if (DB_TYPE(mp) != M_DATA) {
34362dadd65SYuri Pankov 			/*
34462dadd65SYuri Pankov 			 * PR #315560: M_PROTO mblks could be passed for
34562dadd65SYuri Pankov 			 * some reason. Drop them because we don't understand
34662dadd65SYuri Pankov 			 * them and because their contents are not Ethernet
34762dadd65SYuri Pankov 			 * frames anyway.
34862dadd65SYuri Pankov 			 */
34962dadd65SYuri Pankov 			ASSERT(B_FALSE);
35062dadd65SYuri Pankov 			freemsg(mp);
35162dadd65SYuri Pankov 			continue;
35262dadd65SYuri Pankov 		}
35362dadd65SYuri Pankov 
35462dadd65SYuri Pankov 		/*
35562dadd65SYuri Pankov 		 * Prepare the offload while we're still handling the original
35662dadd65SYuri Pankov 		 * message -- msgpullup() discards the metadata afterwards.
35762dadd65SYuri Pankov 		 */
35862dadd65SYuri Pankov 		pullup = vmxnet3_tx_prepare_offload(dp, &ol, mp);
35962dadd65SYuri Pankov 		if (pullup) {
36062dadd65SYuri Pankov 			mblk_t *new_mp = msgpullup(mp, pullup);
36162dadd65SYuri Pankov 			atomic_inc_32(&dp->tx_pullup_needed);
36262dadd65SYuri Pankov 			freemsg(mp);
36362dadd65SYuri Pankov 			if (new_mp) {
36462dadd65SYuri Pankov 				mp = new_mp;
36562dadd65SYuri Pankov 			} else {
36662dadd65SYuri Pankov 				atomic_inc_32(&dp->tx_pullup_failed);
36762dadd65SYuri Pankov 				continue;
36862dadd65SYuri Pankov 			}
36962dadd65SYuri Pankov 		}
37062dadd65SYuri Pankov 
37162dadd65SYuri Pankov 		/*
37262dadd65SYuri Pankov 		 * Try to map the message in the Tx ring.
37362dadd65SYuri Pankov 		 * This call might fail for non-fatal reasons.
37462dadd65SYuri Pankov 		 */
37562dadd65SYuri Pankov 		status = vmxnet3_tx_one(dp, txq, &ol, mp);
37662dadd65SYuri Pankov 		if (status == VMXNET3_TX_PULLUP) {
37762dadd65SYuri Pankov 			/*
37862dadd65SYuri Pankov 			 * Try one more time after flattening
37962dadd65SYuri Pankov 			 * the message with msgpullup().
38062dadd65SYuri Pankov 			 */
38162dadd65SYuri Pankov 			if (mp->b_cont != NULL) {
38262dadd65SYuri Pankov 				mblk_t *new_mp = msgpullup(mp, -1);
38362dadd65SYuri Pankov 				atomic_inc_32(&dp->tx_pullup_needed);
38462dadd65SYuri Pankov 				freemsg(mp);
38562dadd65SYuri Pankov 				if (new_mp) {
38662dadd65SYuri Pankov 					mp = new_mp;
38762dadd65SYuri Pankov 					status = vmxnet3_tx_one(dp, txq, &ol,
38862dadd65SYuri Pankov 					    mp);
38962dadd65SYuri Pankov 				} else {
39062dadd65SYuri Pankov 					atomic_inc_32(&dp->tx_pullup_failed);
39162dadd65SYuri Pankov 					continue;
39262dadd65SYuri Pankov 				}
39362dadd65SYuri Pankov 			}
39462dadd65SYuri Pankov 		}
39562dadd65SYuri Pankov 		if (status != VMXNET3_TX_OK && status != VMXNET3_TX_RINGFULL) {
39662dadd65SYuri Pankov 			/* Fatal failure, drop it */
39762dadd65SYuri Pankov 			atomic_inc_32(&dp->tx_error);
39862dadd65SYuri Pankov 			freemsg(mp);
39962dadd65SYuri Pankov 		}
40062dadd65SYuri Pankov 	} while (mps && status != VMXNET3_TX_RINGFULL);
40162dadd65SYuri Pankov 
40262dadd65SYuri Pankov 	if (status == VMXNET3_TX_RINGFULL) {
40362dadd65SYuri Pankov 		atomic_inc_32(&dp->tx_ring_full);
40462dadd65SYuri Pankov 		mp->b_next = mps;
40562dadd65SYuri Pankov 		mps = mp;
40662dadd65SYuri Pankov 	} else {
40762dadd65SYuri Pankov 		ASSERT(!mps);
40862dadd65SYuri Pankov 	}
40962dadd65SYuri Pankov 
41062dadd65SYuri Pankov 	/* Notify the device */
41162dadd65SYuri Pankov 	mutex_enter(&dp->txLock);
41262dadd65SYuri Pankov 	if (txqCtrl->txNumDeferred >= txqCtrl->txThreshold) {
41362dadd65SYuri Pankov 		txqCtrl->txNumDeferred = 0;
41462dadd65SYuri Pankov 		VMXNET3_BAR0_PUT32(dp, VMXNET3_REG_TXPROD, cmdRing->next2fill);
41562dadd65SYuri Pankov 	}
41662dadd65SYuri Pankov 	mutex_exit(&dp->txLock);
41762dadd65SYuri Pankov 
41862dadd65SYuri Pankov 	return (mps);
41962dadd65SYuri Pankov }
42062dadd65SYuri Pankov 
42162dadd65SYuri Pankov /*
422ca5345b6SSebastien Roy  * Parse a transmit queue and complete packets.
42362dadd65SYuri Pankov  *
424ca5345b6SSebastien Roy  * Returns:
425ca5345b6SSebastien Roy  *	B_TRUE if Tx must be updated or B_FALSE if no action is required.
42662dadd65SYuri Pankov  */
42762dadd65SYuri Pankov boolean_t
42862dadd65SYuri Pankov vmxnet3_tx_complete(vmxnet3_softc_t *dp, vmxnet3_txqueue_t *txq)
42962dadd65SYuri Pankov {
43062dadd65SYuri Pankov 	vmxnet3_cmdring_t *cmdRing = &txq->cmdRing;
43162dadd65SYuri Pankov 	vmxnet3_compring_t *compRing = &txq->compRing;
43262dadd65SYuri Pankov 	Vmxnet3_GenericDesc *compDesc;
43362dadd65SYuri Pankov 	boolean_t completedTx = B_FALSE;
43462dadd65SYuri Pankov 	boolean_t ret = B_FALSE;
43562dadd65SYuri Pankov 
43662dadd65SYuri Pankov 	mutex_enter(&dp->txLock);
43762dadd65SYuri Pankov 
43862dadd65SYuri Pankov 	compDesc = VMXNET3_GET_DESC(compRing, compRing->next2comp);
43962dadd65SYuri Pankov 	while (compDesc->tcd.gen == compRing->gen) {
44062dadd65SYuri Pankov 		vmxnet3_metatx_t *sopMetaDesc, *eopMetaDesc;
44162dadd65SYuri Pankov 		uint16_t sopIdx, eopIdx;
44262dadd65SYuri Pankov 		mblk_t *mp;
44362dadd65SYuri Pankov 
44462dadd65SYuri Pankov 		eopIdx = compDesc->tcd.txdIdx;
44562dadd65SYuri Pankov 		eopMetaDesc = &txq->metaRing[eopIdx];
44662dadd65SYuri Pankov 		sopIdx = eopMetaDesc->sopIdx;
44762dadd65SYuri Pankov 		sopMetaDesc = &txq->metaRing[sopIdx];
44862dadd65SYuri Pankov 
44962dadd65SYuri Pankov 		ASSERT(eopMetaDesc->frags);
45062dadd65SYuri Pankov 		cmdRing->avail += eopMetaDesc->frags;
45162dadd65SYuri Pankov 
45262dadd65SYuri Pankov 		ASSERT(sopMetaDesc->mp);
45362dadd65SYuri Pankov 		mp = sopMetaDesc->mp;
45462dadd65SYuri Pankov 		freemsg(mp);
45562dadd65SYuri Pankov 
45662dadd65SYuri Pankov 		eopMetaDesc->sopIdx = 0;
45762dadd65SYuri Pankov 		eopMetaDesc->frags = 0;
45862dadd65SYuri Pankov 		sopMetaDesc->mp = NULL;
45962dadd65SYuri Pankov 
46062dadd65SYuri Pankov 		completedTx = B_TRUE;
46162dadd65SYuri Pankov 
462*5ceaf02cSYuri Pankov 		VMXNET3_DEBUG(dp, 3, "cp 0x%p on [%u;%u]\n", (void *)mp, sopIdx,
46362dadd65SYuri Pankov 		    eopIdx);
46462dadd65SYuri Pankov 
46562dadd65SYuri Pankov 		VMXNET3_INC_RING_IDX(compRing, compRing->next2comp);
46662dadd65SYuri Pankov 		compDesc = VMXNET3_GET_DESC(compRing, compRing->next2comp);
46762dadd65SYuri Pankov 	}
46862dadd65SYuri Pankov 
46962dadd65SYuri Pankov 	if (dp->txMustResched && completedTx) {
47062dadd65SYuri Pankov 		dp->txMustResched = B_FALSE;
47162dadd65SYuri Pankov 		ret = B_TRUE;
47262dadd65SYuri Pankov 	}
47362dadd65SYuri Pankov 
47462dadd65SYuri Pankov 	mutex_exit(&dp->txLock);
47562dadd65SYuri Pankov 
47662dadd65SYuri Pankov 	return (ret);
47762dadd65SYuri Pankov }
478