162dadd65SYuri Pankov /*
262dadd65SYuri Pankov  * Copyright (C) 2007 VMware, Inc. All rights reserved.
362dadd65SYuri Pankov  *
462dadd65SYuri Pankov  * The contents of this file are subject to the terms of the Common
562dadd65SYuri Pankov  * Development and Distribution License (the "License") version 1.0
662dadd65SYuri Pankov  * and no later version.  You may not use this file except in
762dadd65SYuri Pankov  * compliance with the License.
862dadd65SYuri Pankov  *
962dadd65SYuri Pankov  * You can obtain a copy of the License at
1062dadd65SYuri Pankov  *         http://www.opensource.org/licenses/cddl1.php
1162dadd65SYuri Pankov  *
1262dadd65SYuri Pankov  * See the License for the specific language governing permissions
1362dadd65SYuri Pankov  * and limitations under the License.
1462dadd65SYuri Pankov  */
1562dadd65SYuri Pankov 
1662dadd65SYuri Pankov /*
176849994eSSebastien Roy  * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
18*ec71f88eSPatrick Mooney  * Copyright 2018 Joyent, Inc.
1962dadd65SYuri Pankov  */
2062dadd65SYuri Pankov 
2162dadd65SYuri Pankov #include <vmxnet3.h>
2262dadd65SYuri Pankov 
2362dadd65SYuri Pankov typedef enum vmxnet3_txstatus {
2462dadd65SYuri Pankov 	VMXNET3_TX_OK,
2562dadd65SYuri Pankov 	VMXNET3_TX_FAILURE,
2662dadd65SYuri Pankov 	VMXNET3_TX_PULLUP,
2762dadd65SYuri Pankov 	VMXNET3_TX_RINGFULL
2862dadd65SYuri Pankov } vmxnet3_txstatus;
2962dadd65SYuri Pankov 
3062dadd65SYuri Pankov typedef struct vmxnet3_offload_t {
3162dadd65SYuri Pankov 	uint16_t om;
3262dadd65SYuri Pankov 	uint16_t hlen;
3362dadd65SYuri Pankov 	uint16_t msscof;
3462dadd65SYuri Pankov } vmxnet3_offload_t;
3562dadd65SYuri Pankov 
3662dadd65SYuri Pankov /*
37ca5345b6SSebastien Roy  * Initialize a TxQueue. Currently nothing needs to be done.
3862dadd65SYuri Pankov  */
3962dadd65SYuri Pankov /* ARGSUSED */
4062dadd65SYuri Pankov int
vmxnet3_txqueue_init(vmxnet3_softc_t * dp,vmxnet3_txqueue_t * txq)4162dadd65SYuri Pankov vmxnet3_txqueue_init(vmxnet3_softc_t *dp, vmxnet3_txqueue_t *txq)
4262dadd65SYuri Pankov {
436849994eSSebastien Roy 	return (0);
4462dadd65SYuri Pankov }
4562dadd65SYuri Pankov 
4662dadd65SYuri Pankov /*
47ca5345b6SSebastien Roy  * Finish a TxQueue by freeing all pending Tx.
4862dadd65SYuri Pankov  */
4962dadd65SYuri Pankov void
vmxnet3_txqueue_fini(vmxnet3_softc_t * dp,vmxnet3_txqueue_t * txq)5062dadd65SYuri Pankov vmxnet3_txqueue_fini(vmxnet3_softc_t *dp, vmxnet3_txqueue_t *txq)
5162dadd65SYuri Pankov {
5262dadd65SYuri Pankov 	unsigned int i;
5362dadd65SYuri Pankov 
5462dadd65SYuri Pankov 	ASSERT(!dp->devEnabled);
5562dadd65SYuri Pankov 
5662dadd65SYuri Pankov 	for (i = 0; i < txq->cmdRing.size; i++) {
5762dadd65SYuri Pankov 		mblk_t *mp = txq->metaRing[i].mp;
5862dadd65SYuri Pankov 		if (mp) {
5962dadd65SYuri Pankov 			freemsg(mp);
6062dadd65SYuri Pankov 		}
6162dadd65SYuri Pankov 	}
6262dadd65SYuri Pankov }
6362dadd65SYuri Pankov 
6462dadd65SYuri Pankov /*
65ca5345b6SSebastien Roy  * Build the offload context of a msg.
6662dadd65SYuri Pankov  *
67ca5345b6SSebastien Roy  * Returns:
68ca5345b6SSebastien Roy  *	0 if everything went well.
69ca5345b6SSebastien Roy  *	+n if n bytes need to be pulled up.
70ca5345b6SSebastien Roy  *	-1 in case of error (not used).
7162dadd65SYuri Pankov  */
7262dadd65SYuri Pankov static int
vmxnet3_tx_prepare_offload(vmxnet3_softc_t * dp,vmxnet3_offload_t * ol,mblk_t * mp)7362dadd65SYuri Pankov vmxnet3_tx_prepare_offload(vmxnet3_softc_t *dp, vmxnet3_offload_t *ol,
7462dadd65SYuri Pankov     mblk_t *mp)
7562dadd65SYuri Pankov {
7662dadd65SYuri Pankov 	int ret = 0;
77ca5345b6SSebastien Roy 	uint32_t start, stuff, value, flags, lso_flag, mss;
7862dadd65SYuri Pankov 
7962dadd65SYuri Pankov 	ol->om = VMXNET3_OM_NONE;
8062dadd65SYuri Pankov 	ol->hlen = 0;
8162dadd65SYuri Pankov 	ol->msscof = 0;
8262dadd65SYuri Pankov 
83*ec71f88eSPatrick Mooney 	mac_hcksum_get(mp, &start, &stuff, NULL, &value, &flags);
8462dadd65SYuri Pankov 
85ca5345b6SSebastien Roy 	mac_lso_get(mp, &mss, &lso_flag);
8662dadd65SYuri Pankov 
87ca5345b6SSebastien Roy 	if (flags || lso_flag) {
8862dadd65SYuri Pankov 		struct ether_vlan_header *eth = (void *) mp->b_rptr;
8962dadd65SYuri Pankov 		uint8_t ethLen;
9062dadd65SYuri Pankov 
9162dadd65SYuri Pankov 		if (eth->ether_tpid == htons(ETHERTYPE_VLAN)) {
9262dadd65SYuri Pankov 			ethLen = sizeof (struct ether_vlan_header);
9362dadd65SYuri Pankov 		} else {
9462dadd65SYuri Pankov 			ethLen = sizeof (struct ether_header);
9562dadd65SYuri Pankov 		}
9662dadd65SYuri Pankov 
9762dadd65SYuri Pankov 		VMXNET3_DEBUG(dp, 4, "flags=0x%x, ethLen=%u, start=%u, "
9862dadd65SYuri Pankov 		    "stuff=%u, value=%u\n", flags, ethLen, start, stuff, value);
9962dadd65SYuri Pankov 
100ca5345b6SSebastien Roy 		if (lso_flag & HW_LSO) {
10162dadd65SYuri Pankov 			mblk_t *mblk = mp;
10262dadd65SYuri Pankov 			uint8_t *ip, *tcp;
10362dadd65SYuri Pankov 			uint8_t ipLen, tcpLen;
10462dadd65SYuri Pankov 
10562dadd65SYuri Pankov 			/*
10662dadd65SYuri Pankov 			 * Copy e1000g's behavior:
10762dadd65SYuri Pankov 			 * - Do not assume all the headers are in the same mblk.
10862dadd65SYuri Pankov 			 * - Assume each header is always within one mblk.
10962dadd65SYuri Pankov 			 * - Assume the ethernet header is in the first mblk.
11062dadd65SYuri Pankov 			 */
11162dadd65SYuri Pankov 			ip = mblk->b_rptr + ethLen;
11262dadd65SYuri Pankov 			if (ip >= mblk->b_wptr) {
11362dadd65SYuri Pankov 				mblk = mblk->b_cont;
11462dadd65SYuri Pankov 				ip = mblk->b_rptr;
11562dadd65SYuri Pankov 			}
11662dadd65SYuri Pankov 			ipLen = IPH_HDR_LENGTH((ipha_t *)ip);
11762dadd65SYuri Pankov 			tcp = ip + ipLen;
11862dadd65SYuri Pankov 			if (tcp >= mblk->b_wptr) {
11962dadd65SYuri Pankov 				mblk = mblk->b_cont;
12062dadd65SYuri Pankov 				tcp = mblk->b_rptr;
12162dadd65SYuri Pankov 			}
12262dadd65SYuri Pankov 			tcpLen = TCP_HDR_LENGTH((tcph_t *)tcp);
12362dadd65SYuri Pankov 			/* Careful, '>' instead of '>=' here */
12462dadd65SYuri Pankov 			if (tcp + tcpLen > mblk->b_wptr) {
12562dadd65SYuri Pankov 				mblk = mblk->b_cont;
12662dadd65SYuri Pankov 			}
12762dadd65SYuri Pankov 
12862dadd65SYuri Pankov 			ol->om = VMXNET3_OM_TSO;
12962dadd65SYuri Pankov 			ol->hlen = ethLen + ipLen + tcpLen;
130ca5345b6SSebastien Roy 			ol->msscof = mss;
13162dadd65SYuri Pankov 
13262dadd65SYuri Pankov 			if (mblk != mp) {
13362dadd65SYuri Pankov 				ret = ol->hlen;
13462dadd65SYuri Pankov 			}
135ca5345b6SSebastien Roy 		} else if (flags & HCK_PARTIALCKSUM) {
136ca5345b6SSebastien Roy 			ol->om = VMXNET3_OM_CSUM;
137ca5345b6SSebastien Roy 			ol->hlen = start + ethLen;
138ca5345b6SSebastien Roy 			ol->msscof = stuff + ethLen;
13962dadd65SYuri Pankov 		}
14062dadd65SYuri Pankov 	}
14162dadd65SYuri Pankov 
14262dadd65SYuri Pankov 	return (ret);
14362dadd65SYuri Pankov }
14462dadd65SYuri Pankov 
14562dadd65SYuri Pankov /*
146ca5345b6SSebastien Roy  * Map a msg into the Tx command ring of a vmxnet3 device.
14762dadd65SYuri Pankov  *
148ca5345b6SSebastien Roy  * Returns:
149ca5345b6SSebastien Roy  *	VMXNET3_TX_OK if everything went well.
150ca5345b6SSebastien Roy  *	VMXNET3_TX_RINGFULL if the ring is nearly full.
151ca5345b6SSebastien Roy  *	VMXNET3_TX_PULLUP if the msg is overfragmented.
152ca5345b6SSebastien Roy  *	VMXNET3_TX_FAILURE if there was a DMA or offload error.
15362dadd65SYuri Pankov  *
15462dadd65SYuri Pankov  * Side effects:
155ca5345b6SSebastien Roy  *	The ring is filled if VMXNET3_TX_OK is returned.
15662dadd65SYuri Pankov  */
15762dadd65SYuri Pankov static vmxnet3_txstatus
vmxnet3_tx_one(vmxnet3_softc_t * dp,vmxnet3_txqueue_t * txq,vmxnet3_offload_t * ol,mblk_t * mp)15862dadd65SYuri Pankov vmxnet3_tx_one(vmxnet3_softc_t *dp, vmxnet3_txqueue_t *txq,
15962dadd65SYuri Pankov     vmxnet3_offload_t *ol, mblk_t *mp)
16062dadd65SYuri Pankov {
16162dadd65SYuri Pankov 	int ret = VMXNET3_TX_OK;
16262dadd65SYuri Pankov 	unsigned int frags = 0, totLen = 0;
16362dadd65SYuri Pankov 	vmxnet3_cmdring_t *cmdRing = &txq->cmdRing;
16462dadd65SYuri Pankov 	Vmxnet3_TxQueueCtrl *txqCtrl = txq->sharedCtrl;
16562dadd65SYuri Pankov 	Vmxnet3_GenericDesc *txDesc;
16662dadd65SYuri Pankov 	uint16_t sopIdx, eopIdx;
16762dadd65SYuri Pankov 	uint8_t sopGen, curGen;
16862dadd65SYuri Pankov 	mblk_t *mblk;
16962dadd65SYuri Pankov 
17062dadd65SYuri Pankov 	mutex_enter(&dp->txLock);
17162dadd65SYuri Pankov 
17262dadd65SYuri Pankov 	sopIdx = eopIdx = cmdRing->next2fill;
17362dadd65SYuri Pankov 	sopGen = cmdRing->gen;
17462dadd65SYuri Pankov 	curGen = !cmdRing->gen;
17562dadd65SYuri Pankov 
17662dadd65SYuri Pankov 	for (mblk = mp; mblk != NULL; mblk = mblk->b_cont) {
17762dadd65SYuri Pankov 		unsigned int len = MBLKL(mblk);
17862dadd65SYuri Pankov 		ddi_dma_cookie_t cookie;
17962dadd65SYuri Pankov 		uint_t cookieCount;
18062dadd65SYuri Pankov 
18162dadd65SYuri Pankov 		if (len) {
18262dadd65SYuri Pankov 			totLen += len;
18362dadd65SYuri Pankov 		} else {
18462dadd65SYuri Pankov 			continue;
18562dadd65SYuri Pankov 		}
18662dadd65SYuri Pankov 
18762dadd65SYuri Pankov 		if (ddi_dma_addr_bind_handle(dp->txDmaHandle, NULL,
18862dadd65SYuri Pankov 		    (caddr_t)mblk->b_rptr, len,
19062dadd65SYuri Pankov 		    &cookie, &cookieCount) != DDI_DMA_MAPPED) {
19162dadd65SYuri Pankov 			VMXNET3_WARN(dp, "ddi_dma_addr_bind_handle() failed\n");
19262dadd65SYuri Pankov 			ret = VMXNET3_TX_FAILURE;
19362dadd65SYuri Pankov 			goto error;
19462dadd65SYuri Pankov 		}
19562dadd65SYuri Pankov 
19662dadd65SYuri Pankov 		ASSERT(cookieCount);
19762dadd65SYuri Pankov 
19862dadd65SYuri Pankov 		do {
19962dadd65SYuri Pankov 			uint64_t addr = cookie.dmac_laddress;
20062dadd65SYuri Pankov 			size_t len = cookie.dmac_size;
20162dadd65SYuri Pankov 
20262dadd65SYuri Pankov 			do {
20362dadd65SYuri Pankov 				uint32_t dw2, dw3;
20462dadd65SYuri Pankov 				size_t chunkLen;
20562dadd65SYuri Pankov 
20662dadd65SYuri Pankov 				ASSERT(!txq->metaRing[eopIdx].mp);
20762dadd65SYuri Pankov 				ASSERT(cmdRing->avail - frags);
20862dadd65SYuri Pankov 
20962dadd65SYuri Pankov 				if (frags >= cmdRing->size - 1 ||
21062dadd65SYuri Pankov 				    (ol->om != VMXNET3_OM_TSO &&
21162dadd65SYuri Pankov 				    frags >= VMXNET3_MAX_TXD_PER_PKT)) {
21262dadd65SYuri Pankov 					VMXNET3_DEBUG(dp, 2,
21362dadd65SYuri Pankov 					    "overfragmented mp (%u)\n", frags);
21462dadd65SYuri Pankov 					(void) ddi_dma_unbind_handle(
21562dadd65SYuri Pankov 					    dp->txDmaHandle);
21662dadd65SYuri Pankov 					ret = VMXNET3_TX_PULLUP;
21762dadd65SYuri Pankov 					goto error;
21862dadd65SYuri Pankov 				}
21962dadd65SYuri Pankov 				if (cmdRing->avail - frags <= 1) {
22062dadd65SYuri Pankov 					dp->txMustResched = B_TRUE;
22162dadd65SYuri Pankov 					(void) ddi_dma_unbind_handle(
22262dadd65SYuri Pankov 					    dp->txDmaHandle);
22362dadd65SYuri Pankov 					ret = VMXNET3_TX_RINGFULL;
22462dadd65SYuri Pankov 					goto error;
22562dadd65SYuri Pankov 				}
22662dadd65SYuri Pankov 
22762dadd65SYuri Pankov 				if (len > VMXNET3_MAX_TX_BUF_SIZE) {
22862dadd65SYuri Pankov 					chunkLen = VMXNET3_MAX_TX_BUF_SIZE;
22962dadd65SYuri Pankov 				} else {
23062dadd65SYuri Pankov 					chunkLen = len;
23162dadd65SYuri Pankov 				}
23262dadd65SYuri Pankov 
23362dadd65SYuri Pankov 				frags++;
23462dadd65SYuri Pankov 				eopIdx = cmdRing->next2fill;
23562dadd65SYuri Pankov 
23662dadd65SYuri Pankov 				txDesc = VMXNET3_GET_DESC(cmdRing, eopIdx);
23762dadd65SYuri Pankov 				ASSERT(txDesc->txd.gen != cmdRing->gen);
23862dadd65SYuri Pankov 
23962dadd65SYuri Pankov 				/* txd.addr */
24062dadd65SYuri Pankov 				txDesc->txd.addr = addr;
24162dadd65SYuri Pankov 				/* txd.dw2 */
24262dadd65SYuri Pankov 				dw2 = chunkLen == VMXNET3_MAX_TX_BUF_SIZE ?
24362dadd65SYuri Pankov 				    0 : chunkLen;
24462dadd65SYuri Pankov 				dw2 |= curGen << VMXNET3_TXD_GEN_SHIFT;
24562dadd65SYuri Pankov 				txDesc->dword[2] = dw2;
24662dadd65SYuri Pankov 				ASSERT(txDesc->txd.len == len ||
24762dadd65SYuri Pankov 				    txDesc->txd.len == 0);
24862dadd65SYuri Pankov 				/* txd.dw3 */
24962dadd65SYuri Pankov 				dw3 = 0;
25062dadd65SYuri Pankov 				txDesc->dword[3] = dw3;
25162dadd65SYuri Pankov 
25262dadd65SYuri Pankov 				VMXNET3_INC_RING_IDX(cmdRing,
25362dadd65SYuri Pankov 				    cmdRing->next2fill);
25462dadd65SYuri Pankov 				curGen = cmdRing->gen;
25562dadd65SYuri Pankov 
25662dadd65SYuri Pankov 				addr += chunkLen;
25762dadd65SYuri Pankov 				len -= chunkLen;
25862dadd65SYuri Pankov 			} while (len);
25962dadd65SYuri Pankov 
26062dadd65SYuri Pankov 			if (--cookieCount) {
26162dadd65SYuri Pankov 				ddi_dma_nextcookie(dp->txDmaHandle, &cookie);
26262dadd65SYuri Pankov 			}
26362dadd65SYuri Pankov 		} while (cookieCount);
26462dadd65SYuri Pankov 
26562dadd65SYuri Pankov 		(void) ddi_dma_unbind_handle(dp->txDmaHandle);
26662dadd65SYuri Pankov 	}
26762dadd65SYuri Pankov 
26862dadd65SYuri Pankov 	/* Update the EOP descriptor */
26962dadd65SYuri Pankov 	txDesc = VMXNET3_GET_DESC(cmdRing, eopIdx);
27062dadd65SYuri Pankov 	txDesc->dword[3] |= VMXNET3_TXD_CQ | VMXNET3_TXD_EOP;
27162dadd65SYuri Pankov 
27262dadd65SYuri Pankov 	/* Update the SOP descriptor. Must be done last */
27362dadd65SYuri Pankov 	txDesc = VMXNET3_GET_DESC(cmdRing, sopIdx);
27462dadd65SYuri Pankov 	if (ol->om == VMXNET3_OM_TSO && txDesc->txd.len != 0 &&
27562dadd65SYuri Pankov 	    txDesc->txd.len < ol->hlen) {
27662dadd65SYuri Pankov 		ret = VMXNET3_TX_FAILURE;
27762dadd65SYuri Pankov 		goto error;
27862dadd65SYuri Pankov 	}
27962dadd65SYuri Pankov 	txDesc->txd.om = ol->om;
28062dadd65SYuri Pankov 	txDesc->txd.hlen = ol->hlen;
28162dadd65SYuri Pankov 	txDesc->txd.msscof = ol->msscof;
28262dadd65SYuri Pankov 	membar_producer();
28362dadd65SYuri Pankov 	txDesc->txd.gen = sopGen;
28462dadd65SYuri Pankov 
28562dadd65SYuri Pankov 	/* Update the meta ring & metadata */
28662dadd65SYuri Pankov 	txq->metaRing[sopIdx].mp = mp;
28762dadd65SYuri Pankov 	txq->metaRing[eopIdx].sopIdx = sopIdx;
28862dadd65SYuri Pankov 	txq->metaRing[eopIdx].frags = frags;
28962dadd65SYuri Pankov 	cmdRing->avail -= frags;
29062dadd65SYuri Pankov 	if (ol->om == VMXNET3_OM_TSO) {
29162dadd65SYuri Pankov 		txqCtrl->txNumDeferred +=
29262dadd65SYuri Pankov 		    (totLen - ol->hlen + ol->msscof - 1) / ol->msscof;
29362dadd65SYuri Pankov 	} else {
29462dadd65SYuri Pankov 		txqCtrl->txNumDeferred++;
29562dadd65SYuri Pankov 	}
29662dadd65SYuri Pankov 
2975ceaf02cSYuri Pankov 	VMXNET3_DEBUG(dp, 3, "tx 0x%p on [%u;%u]\n", (void *)mp, sopIdx,
2985ceaf02cSYuri Pankov 	    eopIdx);
29962dadd65SYuri Pankov 
30062dadd65SYuri Pankov 	goto done;
30162dadd65SYuri Pankov 
30262dadd65SYuri Pankov error:
30362dadd65SYuri Pankov 	/* Reverse the generation bits */
30462dadd65SYuri Pankov 	while (sopIdx != cmdRing->next2fill) {
30562dadd65SYuri Pankov 		VMXNET3_DEC_RING_IDX(cmdRing, cmdRing->next2fill);
30662dadd65SYuri Pankov 		txDesc = VMXNET3_GET_DESC(cmdRing, cmdRing->next2fill);
30762dadd65SYuri Pankov 		txDesc->txd.gen = !cmdRing->gen;
30862dadd65SYuri Pankov 	}
30962dadd65SYuri Pankov 
31062dadd65SYuri Pankov done:
31162dadd65SYuri Pankov 	mutex_exit(&dp->txLock);
31262dadd65SYuri Pankov 
31362dadd65SYuri Pankov 	return (ret);
31462dadd65SYuri Pankov }
31562dadd65SYuri Pankov 
31662dadd65SYuri Pankov /*
317ca5345b6SSebastien Roy  * Send packets on a vmxnet3 device.
31862dadd65SYuri Pankov  *
319ca5345b6SSebastien Roy  * Returns:
320ca5345b6SSebastien Roy  *	NULL in case of success or failure.
321ca5345b6SSebastien Roy  *	The mps to be retransmitted later if the ring is full.
32262dadd65SYuri Pankov  */
32362dadd65SYuri Pankov mblk_t *
vmxnet3_tx(void * data,mblk_t * mps)32462dadd65SYuri Pankov vmxnet3_tx(void *data, mblk_t *mps)
32562dadd65SYuri Pankov {
32662dadd65SYuri Pankov 	vmxnet3_softc_t *dp = data;
32762dadd65SYuri Pankov 	vmxnet3_txqueue_t *txq = &dp->txQueue;
32862dadd65SYuri Pankov 	vmxnet3_cmdring_t *cmdRing = &txq->cmdRing;
32962dadd65SYuri Pankov 	Vmxnet3_TxQueueCtrl *txqCtrl = txq->sharedCtrl;
33062dadd65SYuri Pankov 	vmxnet3_txstatus status = VMXNET3_TX_OK;
33162dadd65SYuri Pankov 	mblk_t *mp;
33262dadd65SYuri Pankov 
33362dadd65SYuri Pankov 	ASSERT(mps != NULL);
33462dadd65SYuri Pankov 
33562dadd65SYuri Pankov 	do {
33662dadd65SYuri Pankov 		vmxnet3_offload_t ol;
33762dadd65SYuri Pankov 		int pullup;
33862dadd65SYuri Pankov 
33962dadd65SYuri Pankov 		mp = mps;
34062dadd65SYuri Pankov 		mps = mp->b_next;
34162dadd65SYuri Pankov 		mp->b_next = NULL;
34262dadd65SYuri Pankov 
34362dadd65SYuri Pankov 		if (DB_TYPE(mp) != M_DATA) {
34462dadd65SYuri Pankov 			/*
34562dadd65SYuri Pankov 			 * PR #315560: M_PROTO mblks could be passed for
34662dadd65SYuri Pankov 			 * some reason. Drop them because we don't understand
34762dadd65SYuri Pankov 			 * them and because their contents are not Ethernet
34862dadd65SYuri Pankov 			 * frames anyway.
34962dadd65SYuri Pankov 			 */
35062dadd65SYuri Pankov 			ASSERT(B_FALSE);
35162dadd65SYuri Pankov 			freemsg(mp);
35262dadd65SYuri Pankov 			continue;
35362dadd65SYuri Pankov 		}
35462dadd65SYuri Pankov 
35562dadd65SYuri Pankov 		/*
35662dadd65SYuri Pankov 		 * Prepare the offload while we're still handling the original
35762dadd65SYuri Pankov 		 * message -- msgpullup() discards the metadata afterwards.
35862dadd65SYuri Pankov 		 */
35962dadd65SYuri Pankov 		pullup = vmxnet3_tx_prepare_offload(dp, &ol, mp);
36062dadd65SYuri Pankov 		if (pullup) {
36162dadd65SYuri Pankov 			mblk_t *new_mp = msgpullup(mp, pullup);
36262dadd65SYuri Pankov 			atomic_inc_32(&dp->tx_pullup_needed);
36362dadd65SYuri Pankov 			freemsg(mp);
36462dadd65SYuri Pankov 			if (new_mp) {
36562dadd65SYuri Pankov 				mp = new_mp;
36662dadd65SYuri Pankov 			} else {
36762dadd65SYuri Pankov 				atomic_inc_32(&dp->tx_pullup_failed);
36862dadd65SYuri Pankov 				continue;
36962dadd65SYuri Pankov 			}
37062dadd65SYuri Pankov 		}
37162dadd65SYuri Pankov 
37262dadd65SYuri Pankov 		/*
37362dadd65SYuri Pankov 		 * Try to map the message in the Tx ring.
37462dadd65SYuri Pankov 		 * This call might fail for non-fatal reasons.
37562dadd65SYuri Pankov 		 */
37662dadd65SYuri Pankov 		status = vmxnet3_tx_one(dp, txq, &ol, mp);
37762dadd65SYuri Pankov 		if (status == VMXNET3_TX_PULLUP) {
37862dadd65SYuri Pankov 			/*
37962dadd65SYuri Pankov 			 * Try one more time after flattening
38062dadd65SYuri Pankov 			 * the message with msgpullup().
38162dadd65SYuri Pankov 			 */
38262dadd65SYuri Pankov 			if (mp->b_cont != NULL) {
38362dadd65SYuri Pankov 				mblk_t *new_mp = msgpullup(mp, -1);
38462dadd65SYuri Pankov 				atomic_inc_32(&dp->tx_pullup_needed);
38562dadd65SYuri Pankov 				freemsg(mp);
38662dadd65SYuri Pankov 				if (new_mp) {
38762dadd65SYuri Pankov 					mp = new_mp;
38862dadd65SYuri Pankov 					status = vmxnet3_tx_one(dp, txq, &ol,
38962dadd65SYuri Pankov 					    mp);
39062dadd65SYuri Pankov 				} else {
39162dadd65SYuri Pankov 					atomic_inc_32(&dp->tx_pullup_failed);
39262dadd65SYuri Pankov 					continue;
39362dadd65SYuri Pankov 				}
39462dadd65SYuri Pankov 			}
39562dadd65SYuri Pankov 		}
39662dadd65SYuri Pankov 		if (status != VMXNET3_TX_OK && status != VMXNET3_TX_RINGFULL) {
39762dadd65SYuri Pankov 			/* Fatal failure, drop it */
39862dadd65SYuri Pankov 			atomic_inc_32(&dp->tx_error);
39962dadd65SYuri Pankov 			freemsg(mp);
40062dadd65SYuri Pankov 		}
40162dadd65SYuri Pankov 	} while (mps && status != VMXNET3_TX_RINGFULL);
40262dadd65SYuri Pankov 
40362dadd65SYuri Pankov 	if (status == VMXNET3_TX_RINGFULL) {
40462dadd65SYuri Pankov 		atomic_inc_32(&dp->tx_ring_full);
40562dadd65SYuri Pankov 		mp->b_next = mps;
40662dadd65SYuri Pankov 		mps = mp;
40762dadd65SYuri Pankov 	} else {
40862dadd65SYuri Pankov 		ASSERT(!mps);
40962dadd65SYuri Pankov 	}
41062dadd65SYuri Pankov 
41162dadd65SYuri Pankov 	/* Notify the device */
41262dadd65SYuri Pankov 	mutex_enter(&dp->txLock);
41362dadd65SYuri Pankov 	if (txqCtrl->txNumDeferred >= txqCtrl->txThreshold) {
41462dadd65SYuri Pankov 		txqCtrl->txNumDeferred = 0;
41562dadd65SYuri Pankov 		VMXNET3_BAR0_PUT32(dp, VMXNET3_REG_TXPROD, cmdRing->next2fill);
41662dadd65SYuri Pankov 	}
41762dadd65SYuri Pankov 	mutex_exit(&dp->txLock);
41862dadd65SYuri Pankov 
41962dadd65SYuri Pankov 	return (mps);
42062dadd65SYuri Pankov }
42162dadd65SYuri Pankov 
42262dadd65SYuri Pankov /*
423ca5345b6SSebastien Roy  * Parse a transmit queue and complete packets.
42462dadd65SYuri Pankov  *
425ca5345b6SSebastien Roy  * Returns:
426ca5345b6SSebastien Roy  *	B_TRUE if Tx must be updated or B_FALSE if no action is required.
42762dadd65SYuri Pankov  */
42862dadd65SYuri Pankov boolean_t
vmxnet3_tx_complete(vmxnet3_softc_t * dp,vmxnet3_txqueue_t * txq)42962dadd65SYuri Pankov vmxnet3_tx_complete(vmxnet3_softc_t *dp, vmxnet3_txqueue_t *txq)
43062dadd65SYuri Pankov {
43162dadd65SYuri Pankov 	vmxnet3_cmdring_t *cmdRing = &txq->cmdRing;
43262dadd65SYuri Pankov 	vmxnet3_compring_t *compRing = &txq->compRing;
43362dadd65SYuri Pankov 	Vmxnet3_GenericDesc *compDesc;
43462dadd65SYuri Pankov 	boolean_t completedTx = B_FALSE;
43562dadd65SYuri Pankov 	boolean_t ret = B_FALSE;
43662dadd65SYuri Pankov 
43762dadd65SYuri Pankov 	mutex_enter(&dp->txLock);
43862dadd65SYuri Pankov 
43962dadd65SYuri Pankov 	compDesc = VMXNET3_GET_DESC(compRing, compRing->next2comp);
44062dadd65SYuri Pankov 	while (compDesc->tcd.gen == compRing->gen) {
44162dadd65SYuri Pankov 		vmxnet3_metatx_t *sopMetaDesc, *eopMetaDesc;
44262dadd65SYuri Pankov 		uint16_t sopIdx, eopIdx;
44362dadd65SYuri Pankov 		mblk_t *mp;
44462dadd65SYuri Pankov 
44562dadd65SYuri Pankov 		eopIdx = compDesc->tcd.txdIdx;
44662dadd65SYuri Pankov 		eopMetaDesc = &txq->metaRing[eopIdx];
44762dadd65SYuri Pankov 		sopIdx = eopMetaDesc->sopIdx;
44862dadd65SYuri Pankov 		sopMetaDesc = &txq->metaRing[sopIdx];
44962dadd65SYuri Pankov 
45062dadd65SYuri Pankov 		ASSERT(eopMetaDesc->frags);
45162dadd65SYuri Pankov 		cmdRing->avail += eopMetaDesc->frags;
45262dadd65SYuri Pankov 
45362dadd65SYuri Pankov 		ASSERT(sopMetaDesc->mp);
45462dadd65SYuri Pankov 		mp = sopMetaDesc->mp;
45562dadd65SYuri Pankov 		freemsg(mp);
45662dadd65SYuri Pankov 
45762dadd65SYuri Pankov 		eopMetaDesc->sopIdx = 0;
45862dadd65SYuri Pankov 		eopMetaDesc->frags = 0;
45962dadd65SYuri Pankov 		sopMetaDesc->mp = NULL;
46062dadd65SYuri Pankov 
46162dadd65SYuri Pankov 		completedTx = B_TRUE;
46262dadd65SYuri Pankov 
4635ceaf02cSYuri Pankov 		VMXNET3_DEBUG(dp, 3, "cp 0x%p on [%u;%u]\n", (void *)mp, sopIdx,
46462dadd65SYuri Pankov 		    eopIdx);
46562dadd65SYuri Pankov 
46662dadd65SYuri Pankov 		VMXNET3_INC_RING_IDX(compRing, compRing->next2comp);
46762dadd65SYuri Pankov 		compDesc = VMXNET3_GET_DESC(compRing, compRing->next2comp);
46862dadd65SYuri Pankov 	}
46962dadd65SYuri Pankov 
47062dadd65SYuri Pankov 	if (dp->txMustResched && completedTx) {
47162dadd65SYuri Pankov 		dp->txMustResched = B_FALSE;
47262dadd65SYuri Pankov 		ret = B_TRUE;
47362dadd65SYuri Pankov 	}
47462dadd65SYuri Pankov 
47562dadd65SYuri Pankov 	mutex_exit(&dp->txLock);
47662dadd65SYuri Pankov 
47762dadd65SYuri Pankov 	return (ret);
47862dadd65SYuri Pankov }