162dadd65SYuri Pankov /*
262dadd65SYuri Pankov  * Copyright (C) 2007 VMware, Inc. All rights reserved.
362dadd65SYuri Pankov  *
462dadd65SYuri Pankov  * The contents of this file are subject to the terms of the Common
562dadd65SYuri Pankov  * Development and Distribution License (the "License") version 1.0
662dadd65SYuri Pankov  * and no later version.  You may not use this file except in
762dadd65SYuri Pankov  * compliance with the License.
862dadd65SYuri Pankov  *
962dadd65SYuri Pankov  * You can obtain a copy of the License at
1062dadd65SYuri Pankov  *         http://www.opensource.org/licenses/cddl1.php
1162dadd65SYuri Pankov  *
1262dadd65SYuri Pankov  * See the License for the specific language governing permissions
1362dadd65SYuri Pankov  * and limitations under the License.
1462dadd65SYuri Pankov  */
1562dadd65SYuri Pankov /*
166849994eSSebastien Roy  * Copyright (c) 2013, 2016 by Delphix. All rights reserved.
17*ec71f88eSPatrick Mooney  * Copyright 2018 Joyent, Inc.
1862dadd65SYuri Pankov  */
1962dadd65SYuri Pankov 
2062dadd65SYuri Pankov #include <vmxnet3.h>
2162dadd65SYuri Pankov 
22ca5345b6SSebastien Roy static void vmxnet3_put_rxbuf(vmxnet3_rxbuf_t *);
2362dadd65SYuri Pankov 
2462dadd65SYuri Pankov /*
25ca5345b6SSebastien Roy  * Allocate a new rxBuf from memory. All its fields are set except
26ca5345b6SSebastien Roy  * for its associated mblk which has to be allocated later.
2762dadd65SYuri Pankov  *
28ca5345b6SSebastien Roy  * Returns:
29ca5345b6SSebastien Roy  *	A new rxBuf or NULL.
3062dadd65SYuri Pankov  */
3162dadd65SYuri Pankov static vmxnet3_rxbuf_t *
vmxnet3_alloc_rxbuf(vmxnet3_softc_t * dp,boolean_t canSleep)3262dadd65SYuri Pankov vmxnet3_alloc_rxbuf(vmxnet3_softc_t *dp, boolean_t canSleep)
3362dadd65SYuri Pankov {
3462dadd65SYuri Pankov 	vmxnet3_rxbuf_t *rxBuf;
3562dadd65SYuri Pankov 	int flag = canSleep ? KM_SLEEP : KM_NOSLEEP;
3662dadd65SYuri Pankov 	int err;
3762dadd65SYuri Pankov 
3862dadd65SYuri Pankov 	rxBuf = kmem_zalloc(sizeof (vmxnet3_rxbuf_t), flag);
3962dadd65SYuri Pankov 	if (!rxBuf) {
4062dadd65SYuri Pankov 		atomic_inc_32(&dp->rx_alloc_failed);
4162dadd65SYuri Pankov 		return (NULL);
4262dadd65SYuri Pankov 	}
4362dadd65SYuri Pankov 
4462dadd65SYuri Pankov 	if ((err = vmxnet3_alloc_dma_mem_1(dp, &rxBuf->dma, (dp->cur_mtu + 18),
456849994eSSebastien Roy 	    canSleep)) != 0) {
4662dadd65SYuri Pankov 		VMXNET3_DEBUG(dp, 0, "Failed to allocate %d bytes for rx buf, "
4762dadd65SYuri Pankov 		    "err:%d\n", (dp->cur_mtu + 18), err);
4862dadd65SYuri Pankov 		kmem_free(rxBuf, sizeof (vmxnet3_rxbuf_t));
4962dadd65SYuri Pankov 		atomic_inc_32(&dp->rx_alloc_failed);
5062dadd65SYuri Pankov 		return (NULL);
5162dadd65SYuri Pankov 	}
5262dadd65SYuri Pankov 
5362dadd65SYuri Pankov 	rxBuf->freeCB.free_func = vmxnet3_put_rxbuf;
5462dadd65SYuri Pankov 	rxBuf->freeCB.free_arg = (caddr_t)rxBuf;
5562dadd65SYuri Pankov 	rxBuf->dp = dp;
5662dadd65SYuri Pankov 
576849994eSSebastien Roy 	atomic_inc_32(&dp->rx_num_bufs);
586849994eSSebastien Roy 	atomic_inc_32(&dp->rx_alloc_buf);
5962dadd65SYuri Pankov 	return (rxBuf);
6062dadd65SYuri Pankov }
6162dadd65SYuri Pankov 
6262dadd65SYuri Pankov static void
vmxnet3_free_rxbuf(vmxnet3_softc_t * dp,vmxnet3_rxbuf_t * rxBuf)6362dadd65SYuri Pankov vmxnet3_free_rxbuf(vmxnet3_softc_t *dp, vmxnet3_rxbuf_t *rxBuf)
6462dadd65SYuri Pankov {
6562dadd65SYuri Pankov 	vmxnet3_free_dma_mem(&rxBuf->dma);
6662dadd65SYuri Pankov 	kmem_free(rxBuf, sizeof (vmxnet3_rxbuf_t));
6762dadd65SYuri Pankov 
686849994eSSebastien Roy #ifndef	DEBUG
696849994eSSebastien Roy 	atomic_dec_32(&dp->rx_num_bufs);
7062dadd65SYuri Pankov #else
7162dadd65SYuri Pankov 	{
726849994eSSebastien Roy 		uint32_t nv = atomic_dec_32_nv(&dp->rx_num_bufs);
7362dadd65SYuri Pankov 		ASSERT(nv != (uint32_t)-1);
7462dadd65SYuri Pankov 	}
7562dadd65SYuri Pankov #endif
7662dadd65SYuri Pankov }
7762dadd65SYuri Pankov 
7862dadd65SYuri Pankov /*
796849994eSSebastien Roy  * Return a rxBuf to the pool. The init argument, when B_TRUE, indicates
806849994eSSebastien Roy  * that we're being called for the purpose of pool initialization, and
816849994eSSebastien Roy  * therefore, we should place the buffer in the pool even if the device
826849994eSSebastien Roy  * isn't enabled.
8362dadd65SYuri Pankov  *
84ca5345b6SSebastien Roy  * Returns:
856849994eSSebastien Roy  *	B_TRUE if the buffer was returned to the pool, or B_FALSE if it
866849994eSSebastien Roy  *	wasn't (e.g. if the device is stopped).
8762dadd65SYuri Pankov  */
8862dadd65SYuri Pankov static boolean_t
vmxnet3_put_rxpool_buf(vmxnet3_softc_t * dp,vmxnet3_rxbuf_t * rxBuf,boolean_t init)896849994eSSebastien Roy vmxnet3_put_rxpool_buf(vmxnet3_softc_t *dp, vmxnet3_rxbuf_t *rxBuf,
906849994eSSebastien Roy     boolean_t init)
9162dadd65SYuri Pankov {
9262dadd65SYuri Pankov 	vmxnet3_rxpool_t *rxPool = &dp->rxPool;
9362dadd65SYuri Pankov 	boolean_t returned = B_FALSE;
9462dadd65SYuri Pankov 
9562dadd65SYuri Pankov 	mutex_enter(&dp->rxPoolLock);
9662dadd65SYuri Pankov 	ASSERT(rxPool->nBufs <= rxPool->nBufsLimit);
976849994eSSebastien Roy 	if ((dp->devEnabled || init) && rxPool->nBufs < rxPool->nBufsLimit) {
9862dadd65SYuri Pankov 		ASSERT((rxPool->listHead == NULL && rxPool->nBufs == 0) ||
9962dadd65SYuri Pankov 		    (rxPool->listHead != NULL && rxPool->nBufs != 0));
10062dadd65SYuri Pankov 		rxBuf->next = rxPool->listHead;
10162dadd65SYuri Pankov 		rxPool->listHead = rxBuf;
10262dadd65SYuri Pankov 		rxPool->nBufs++;
10362dadd65SYuri Pankov 		returned = B_TRUE;
10462dadd65SYuri Pankov 	}
10562dadd65SYuri Pankov 	mutex_exit(&dp->rxPoolLock);
10662dadd65SYuri Pankov 	return (returned);
10762dadd65SYuri Pankov }
10862dadd65SYuri Pankov 
10962dadd65SYuri Pankov /*
110ca5345b6SSebastien Roy  * Return a rxBuf to the pool or free it.
11162dadd65SYuri Pankov  */
11262dadd65SYuri Pankov static void
vmxnet3_put_rxbuf(vmxnet3_rxbuf_t * rxBuf)11362dadd65SYuri Pankov vmxnet3_put_rxbuf(vmxnet3_rxbuf_t *rxBuf)
11462dadd65SYuri Pankov {
11562dadd65SYuri Pankov 	vmxnet3_softc_t *dp = rxBuf->dp;
11662dadd65SYuri Pankov 
1176849994eSSebastien Roy 	if (!vmxnet3_put_rxpool_buf(dp, rxBuf, B_FALSE))
11862dadd65SYuri Pankov 		vmxnet3_free_rxbuf(dp, rxBuf);
11962dadd65SYuri Pankov }
12062dadd65SYuri Pankov 
12162dadd65SYuri Pankov /*
122ca5345b6SSebastien Roy  * Get an unused rxBuf from the pool.
12362dadd65SYuri Pankov  *
124ca5345b6SSebastien Roy  * Returns:
125ca5345b6SSebastien Roy  *	A rxBuf or NULL if there are no buffers in the pool.
12662dadd65SYuri Pankov  */
12762dadd65SYuri Pankov static vmxnet3_rxbuf_t *
vmxnet3_get_rxpool_buf(vmxnet3_softc_t * dp)12862dadd65SYuri Pankov vmxnet3_get_rxpool_buf(vmxnet3_softc_t *dp)
12962dadd65SYuri Pankov {
13062dadd65SYuri Pankov 	vmxnet3_rxpool_t *rxPool = &dp->rxPool;
13162dadd65SYuri Pankov 	vmxnet3_rxbuf_t *rxBuf = NULL;
13262dadd65SYuri Pankov 
13362dadd65SYuri Pankov 	mutex_enter(&dp->rxPoolLock);
1346849994eSSebastien Roy 	if (rxPool->listHead != NULL) {
13562dadd65SYuri Pankov 		rxBuf = rxPool->listHead;
13662dadd65SYuri Pankov 		rxPool->listHead = rxBuf->next;
13762dadd65SYuri Pankov 		rxPool->nBufs--;
13862dadd65SYuri Pankov 		ASSERT((rxPool->listHead == NULL && rxPool->nBufs == 0) ||
13962dadd65SYuri Pankov 		    (rxPool->listHead != NULL && rxPool->nBufs != 0));
14062dadd65SYuri Pankov 	}
14162dadd65SYuri Pankov 	mutex_exit(&dp->rxPoolLock);
14262dadd65SYuri Pankov 	return (rxBuf);
14362dadd65SYuri Pankov }
14462dadd65SYuri Pankov 
14562dadd65SYuri Pankov /*
1466849994eSSebastien Roy  * Fill a rxPool by allocating the maximum number of buffers.
14762dadd65SYuri Pankov  *
148ca5345b6SSebastien Roy  * Returns:
1496849994eSSebastien Roy  *	0 on success, non-zero on failure.
15062dadd65SYuri Pankov  */
1516849994eSSebastien Roy static int
vmxnet3_rxpool_init(vmxnet3_softc_t * dp)1526849994eSSebastien Roy vmxnet3_rxpool_init(vmxnet3_softc_t *dp)
15362dadd65SYuri Pankov {
1546849994eSSebastien Roy 	int err = 0;
15562dadd65SYuri Pankov 	vmxnet3_rxbuf_t *rxBuf;
15662dadd65SYuri Pankov 
1576849994eSSebastien Roy 	ASSERT(dp->rxPool.nBufsLimit > 0);
1586849994eSSebastien Roy 	while (dp->rxPool.nBufs < dp->rxPool.nBufsLimit) {
1596849994eSSebastien Roy 		if ((rxBuf = vmxnet3_alloc_rxbuf(dp, B_FALSE)) == NULL) {
1606849994eSSebastien Roy 			err = ENOMEM;
1616849994eSSebastien Roy 			break;
1626849994eSSebastien Roy 		}
1636849994eSSebastien Roy 		VERIFY(vmxnet3_put_rxpool_buf(dp, rxBuf, B_TRUE));
16462dadd65SYuri Pankov 	}
16562dadd65SYuri Pankov 
1666849994eSSebastien Roy 	if (err != 0) {
1676849994eSSebastien Roy 		while ((rxBuf = vmxnet3_get_rxpool_buf(dp)) != NULL) {
1686849994eSSebastien Roy 			vmxnet3_free_rxbuf(dp, rxBuf);
16962dadd65SYuri Pankov 		}
17062dadd65SYuri Pankov 	}
17162dadd65SYuri Pankov 
1726849994eSSebastien Roy 	return (err);
17362dadd65SYuri Pankov }
17462dadd65SYuri Pankov 
17562dadd65SYuri Pankov /*
1766849994eSSebastien Roy  * Populate a Rx descriptor with a new rxBuf. If the pool argument is B_TRUE,
1776849994eSSebastien Roy  * then try to take a buffer from rxPool. If the pool is empty and the
1786849994eSSebastien Roy  * dp->alloc_ok is true, then fall back to dynamic allocation. If pool is
1796849994eSSebastien Roy  * B_FALSE, then always allocate a new buffer (this is only used when
1806849994eSSebastien Roy  * populating the initial set of buffers in the receive queue during start).
18162dadd65SYuri Pankov  *
182ca5345b6SSebastien Roy  * Returns:
1836849994eSSebastien Roy  *	0 on success, non-zero on failure.
18462dadd65SYuri Pankov  */
18562dadd65SYuri Pankov static int
vmxnet3_rx_populate(vmxnet3_softc_t * dp,vmxnet3_rxqueue_t * rxq,uint16_t idx,boolean_t canSleep,boolean_t pool)18662dadd65SYuri Pankov vmxnet3_rx_populate(vmxnet3_softc_t *dp, vmxnet3_rxqueue_t *rxq, uint16_t idx,
1876849994eSSebastien Roy     boolean_t canSleep, boolean_t pool)
18862dadd65SYuri Pankov {
1896849994eSSebastien Roy 	vmxnet3_rxbuf_t *rxBuf = NULL;
1906849994eSSebastien Roy 
1916849994eSSebastien Roy 	if (pool && (rxBuf = vmxnet3_get_rxpool_buf(dp)) == NULL) {
1926849994eSSebastien Roy 		/* The maximum number of pool buffers have been allocated. */
1936849994eSSebastien Roy 		atomic_inc_32(&dp->rx_pool_empty);
1946849994eSSebastien Roy 		if (!dp->alloc_ok) {
1956849994eSSebastien Roy 			atomic_inc_32(&dp->rx_alloc_failed);
1966849994eSSebastien Roy 		}
1976849994eSSebastien Roy 	}
1986849994eSSebastien Roy 
1996849994eSSebastien Roy 	if (rxBuf == NULL && (!pool || dp->alloc_ok)) {
2006849994eSSebastien Roy 		rxBuf = vmxnet3_alloc_rxbuf(dp, canSleep);
2016849994eSSebastien Roy 	}
2026849994eSSebastien Roy 
2036849994eSSebastien Roy 	if (rxBuf != NULL) {
2046849994eSSebastien Roy 		rxBuf->mblk = desballoc((uchar_t *)rxBuf->dma.buf,
2056849994eSSebastien Roy 		    rxBuf->dma.bufLen, BPRI_MED, &rxBuf->freeCB);
2066849994eSSebastien Roy 		if (rxBuf->mblk == NULL) {
2076849994eSSebastien Roy 			if (pool) {
2086849994eSSebastien Roy 				VERIFY(vmxnet3_put_rxpool_buf(dp, rxBuf,
2096849994eSSebastien Roy 				    B_FALSE));
2106849994eSSebastien Roy 			} else {
2116849994eSSebastien Roy 				vmxnet3_free_rxbuf(dp, rxBuf);
2126849994eSSebastien Roy 			}
2136849994eSSebastien Roy 			atomic_inc_32(&dp->rx_alloc_failed);
2146849994eSSebastien Roy 			return (ENOMEM);
2156849994eSSebastien Roy 		}
21662dadd65SYuri Pankov 
21762dadd65SYuri Pankov 		vmxnet3_cmdring_t *cmdRing = &rxq->cmdRing;
21862dadd65SYuri Pankov 		Vmxnet3_GenericDesc *rxDesc = VMXNET3_GET_DESC(cmdRing, idx);
21962dadd65SYuri Pankov 
22062dadd65SYuri Pankov 		rxq->bufRing[idx].rxBuf = rxBuf;
22162dadd65SYuri Pankov 		rxDesc->rxd.addr = rxBuf->dma.bufPA;
22262dadd65SYuri Pankov 		rxDesc->rxd.len = rxBuf->dma.bufLen;
22362dadd65SYuri Pankov 		/* rxDesc->rxd.btype = 0; */
22462dadd65SYuri Pankov 		membar_producer();
22562dadd65SYuri Pankov 		rxDesc->rxd.gen = cmdRing->gen;
22662dadd65SYuri Pankov 	} else {
2276849994eSSebastien Roy 		return (ENOMEM);
22862dadd65SYuri Pankov 	}
22962dadd65SYuri Pankov 
2306849994eSSebastien Roy 	return (0);
23162dadd65SYuri Pankov }
23262dadd65SYuri Pankov 
23362dadd65SYuri Pankov /*
234ca5345b6SSebastien Roy  * Initialize a RxQueue by populating the whole Rx ring with rxBufs.
23562dadd65SYuri Pankov  *
236ca5345b6SSebastien Roy  * Returns:
2376849994eSSebastien Roy  *	0 on success, non-zero on failure.
23862dadd65SYuri Pankov  */
23962dadd65SYuri Pankov int
vmxnet3_rxqueue_init(vmxnet3_softc_t * dp,vmxnet3_rxqueue_t * rxq)24062dadd65SYuri Pankov vmxnet3_rxqueue_init(vmxnet3_softc_t *dp, vmxnet3_rxqueue_t *rxq)
24162dadd65SYuri Pankov {
24262dadd65SYuri Pankov 	vmxnet3_cmdring_t *cmdRing = &rxq->cmdRing;
2436849994eSSebastien Roy 	int err;
2446849994eSSebastien Roy 
2456849994eSSebastien Roy 	dp->rxPool.nBufsLimit = vmxnet3_getprop(dp, "RxBufPoolLimit", 0,
2466849994eSSebastien Roy 	    cmdRing->size * 10, cmdRing->size * 2);
24762dadd65SYuri Pankov 
24862dadd65SYuri Pankov 	do {
2496849994eSSebastien Roy 		if ((err = vmxnet3_rx_populate(dp, rxq, cmdRing->next2fill,
2506849994eSSebastien Roy 		    B_TRUE, B_FALSE)) != 0) {
25162dadd65SYuri Pankov 			goto error;
25262dadd65SYuri Pankov 		}
25362dadd65SYuri Pankov 		VMXNET3_INC_RING_IDX(cmdRing, cmdRing->next2fill);
25462dadd65SYuri Pankov 	} while (cmdRing->next2fill);
25562dadd65SYuri Pankov 
2566849994eSSebastien Roy 	/*
2576849994eSSebastien Roy 	 * Pre-allocate rxPool buffers so that we never have to allocate
2586849994eSSebastien Roy 	 * new buffers from interrupt context when we need to replace a buffer
2596849994eSSebastien Roy 	 * in the rxqueue.
2606849994eSSebastien Roy 	 */
2616849994eSSebastien Roy 	if ((err = vmxnet3_rxpool_init(dp)) != 0) {
2626849994eSSebastien Roy 		goto error;
2636849994eSSebastien Roy 	}
26462dadd65SYuri Pankov 
2656849994eSSebastien Roy 	return (0);
26662dadd65SYuri Pankov 
26762dadd65SYuri Pankov error:
26862dadd65SYuri Pankov 	while (cmdRing->next2fill) {
26962dadd65SYuri Pankov 		VMXNET3_DEC_RING_IDX(cmdRing, cmdRing->next2fill);
27062dadd65SYuri Pankov 		vmxnet3_free_rxbuf(dp, rxq->bufRing[cmdRing->next2fill].rxBuf);
27162dadd65SYuri Pankov 	}
27262dadd65SYuri Pankov 
2736849994eSSebastien Roy 	return (err);
27462dadd65SYuri Pankov }
27562dadd65SYuri Pankov 
27662dadd65SYuri Pankov /*
277ca5345b6SSebastien Roy  * Finish a RxQueue by freeing all the related rxBufs.
27862dadd65SYuri Pankov  */
27962dadd65SYuri Pankov void
vmxnet3_rxqueue_fini(vmxnet3_softc_t * dp,vmxnet3_rxqueue_t * rxq)28062dadd65SYuri Pankov vmxnet3_rxqueue_fini(vmxnet3_softc_t *dp, vmxnet3_rxqueue_t *rxq)
28162dadd65SYuri Pankov {
28262dadd65SYuri Pankov 	vmxnet3_rxbuf_t *rxBuf;
28362dadd65SYuri Pankov 	unsigned int i;
28462dadd65SYuri Pankov 
28562dadd65SYuri Pankov 	ASSERT(!dp->devEnabled);
28662dadd65SYuri Pankov 
28762dadd65SYuri Pankov 	/* First the rxPool */
28862dadd65SYuri Pankov 	while ((rxBuf = vmxnet3_get_rxpool_buf(dp)))
28962dadd65SYuri Pankov 		vmxnet3_free_rxbuf(dp, rxBuf);
29062dadd65SYuri Pankov 
29162dadd65SYuri Pankov 	/* Then the ring */
29262dadd65SYuri Pankov 	for (i = 0; i < rxq->cmdRing.size; i++) {
29362dadd65SYuri Pankov 		rxBuf = rxq->bufRing[i].rxBuf;
29462dadd65SYuri Pankov 		ASSERT(rxBuf);
29562dadd65SYuri Pankov 		ASSERT(rxBuf->mblk);
29662dadd65SYuri Pankov 		/*
29762dadd65SYuri Pankov 		 * Here, freemsg() will trigger a call to vmxnet3_put_rxbuf()
29862dadd65SYuri Pankov 		 * which will then call vmxnet3_free_rxbuf() because the
29962dadd65SYuri Pankov 		 * underlying device is disabled.
30062dadd65SYuri Pankov 		 */
30162dadd65SYuri Pankov 		freemsg(rxBuf->mblk);
30262dadd65SYuri Pankov 	}
30362dadd65SYuri Pankov }
30462dadd65SYuri Pankov 
30562dadd65SYuri Pankov /*
306ca5345b6SSebastien Roy  * Determine if a received packet was checksummed by the Vmxnet3
307ca5345b6SSebastien Roy  * device and tag the mp appropriately.
30862dadd65SYuri Pankov  */
30962dadd65SYuri Pankov static void
vmxnet3_rx_hwcksum(vmxnet3_softc_t * dp,mblk_t * mp,Vmxnet3_GenericDesc * compDesc)31062dadd65SYuri Pankov vmxnet3_rx_hwcksum(vmxnet3_softc_t *dp, mblk_t *mp,
31162dadd65SYuri Pankov     Vmxnet3_GenericDesc *compDesc)
31262dadd65SYuri Pankov {
31362dadd65SYuri Pankov 	uint32_t flags = 0;
31462dadd65SYuri Pankov 
31562dadd65SYuri Pankov 	if (!compDesc->rcd.cnc) {
31662dadd65SYuri Pankov 		if (compDesc->rcd.v4 && compDesc->rcd.ipc) {
31762dadd65SYuri Pankov 			flags |= HCK_IPV4_HDRCKSUM;
31862dadd65SYuri Pankov 			if ((compDesc->rcd.tcp || compDesc->rcd.udp) &&
31962dadd65SYuri Pankov 			    compDesc->rcd.tuc) {
32062dadd65SYuri Pankov 				flags |= HCK_FULLCKSUM | HCK_FULLCKSUM_OK;
32162dadd65SYuri Pankov 			}
32262dadd65SYuri Pankov 		}
32362dadd65SYuri Pankov 
32462dadd65SYuri Pankov 		VMXNET3_DEBUG(dp, 3, "rx cksum flags = 0x%x\n", flags);
32562dadd65SYuri Pankov 
326*ec71f88eSPatrick Mooney 		mac_hcksum_set(mp, 0, 0, 0, 0, flags);
32762dadd65SYuri Pankov 	}
32862dadd65SYuri Pankov }
32962dadd65SYuri Pankov 
33062dadd65SYuri Pankov /*
331ca5345b6SSebastien Roy  * Interrupt handler for Rx. Look if there are any pending Rx and
332ca5345b6SSebastien Roy  * put them in mplist.
33362dadd65SYuri Pankov  *
334ca5345b6SSebastien Roy  * Returns:
335ca5345b6SSebastien Roy  *	A list of messages to pass to the MAC subystem.
33662dadd65SYuri Pankov  */
33762dadd65SYuri Pankov mblk_t *
vmxnet3_rx_intr(vmxnet3_softc_t * dp,vmxnet3_rxqueue_t * rxq)33862dadd65SYuri Pankov vmxnet3_rx_intr(vmxnet3_softc_t *dp, vmxnet3_rxqueue_t *rxq)
33962dadd65SYuri Pankov {
34062dadd65SYuri Pankov 	vmxnet3_compring_t *compRing = &rxq->compRing;
34162dadd65SYuri Pankov 	vmxnet3_cmdring_t *cmdRing = &rxq->cmdRing;
34262dadd65SYuri Pankov 	Vmxnet3_RxQueueCtrl *rxqCtrl = rxq->sharedCtrl;
34362dadd65SYuri Pankov 	Vmxnet3_GenericDesc *compDesc;
34462dadd65SYuri Pankov 	mblk_t *mplist = NULL, **mplistTail = &mplist;
34562dadd65SYuri Pankov 
34662dadd65SYuri Pankov 	ASSERT(mutex_owned(&dp->intrLock));
34762dadd65SYuri Pankov 
34862dadd65SYuri Pankov 	compDesc = VMXNET3_GET_DESC(compRing, compRing->next2comp);
34962dadd65SYuri Pankov 	while (compDesc->rcd.gen == compRing->gen) {
35062dadd65SYuri Pankov 		mblk_t *mp = NULL, **mpTail = &mp;
35162dadd65SYuri Pankov 		boolean_t mpValid = B_TRUE;
35262dadd65SYuri Pankov 		boolean_t eop;
35362dadd65SYuri Pankov 
35462dadd65SYuri Pankov 		ASSERT(compDesc->rcd.sop);
35562dadd65SYuri Pankov 
35662dadd65SYuri Pankov 		do {
35762dadd65SYuri Pankov 			uint16_t rxdIdx = compDesc->rcd.rxdIdx;
35862dadd65SYuri Pankov 			vmxnet3_rxbuf_t *rxBuf = rxq->bufRing[rxdIdx].rxBuf;
35962dadd65SYuri Pankov 			mblk_t *mblk = rxBuf->mblk;
36062dadd65SYuri Pankov 			Vmxnet3_GenericDesc *rxDesc;
36162dadd65SYuri Pankov 
36262dadd65SYuri Pankov 			while (compDesc->rcd.gen != compRing->gen) {
36362dadd65SYuri Pankov 				/*
36462dadd65SYuri Pankov 				 * H/W may be still be in the middle of
36562dadd65SYuri Pankov 				 * generating this entry, so hold on until
36662dadd65SYuri Pankov 				 * the gen bit is flipped.
36762dadd65SYuri Pankov 				 */
36862dadd65SYuri Pankov 				membar_consumer();
36962dadd65SYuri Pankov 			}
37062dadd65SYuri Pankov 			ASSERT(compDesc->rcd.gen == compRing->gen);
37162dadd65SYuri Pankov 			ASSERT(rxBuf);
37262dadd65SYuri Pankov 			ASSERT(mblk);
37362dadd65SYuri Pankov 
37462dadd65SYuri Pankov 			/* Some Rx descriptors may have been skipped */
37562dadd65SYuri Pankov 			while (cmdRing->next2fill != rxdIdx) {
37662dadd65SYuri Pankov 				rxDesc = VMXNET3_GET_DESC(cmdRing,
37762dadd65SYuri Pankov 				    cmdRing->next2fill);
37862dadd65SYuri Pankov 				rxDesc->rxd.gen = cmdRing->gen;
37962dadd65SYuri Pankov 				VMXNET3_INC_RING_IDX(cmdRing,
38062dadd65SYuri Pankov 				    cmdRing->next2fill);
38162dadd65SYuri Pankov 			}
38262dadd65SYuri Pankov 
38362dadd65SYuri Pankov 			eop = compDesc->rcd.eop;
38462dadd65SYuri Pankov 
38562dadd65SYuri Pankov 			/*
38662dadd65SYuri Pankov 			 * Now we have a piece of the packet in the rxdIdx
38762dadd65SYuri Pankov 			 * descriptor. Grab it only if we achieve to replace
38862dadd65SYuri Pankov 			 * it with a fresh buffer.
38962dadd65SYuri Pankov 			 */
3906849994eSSebastien Roy 			if (vmxnet3_rx_populate(dp, rxq, rxdIdx, B_FALSE,
3916849994eSSebastien Roy 			    B_TRUE) == 0) {
39262dadd65SYuri Pankov 				/* Success, we can chain the mblk with the mp */
39362dadd65SYuri Pankov 				mblk->b_wptr = mblk->b_rptr + compDesc->rcd.len;
39462dadd65SYuri Pankov 				*mpTail = mblk;
39562dadd65SYuri Pankov 				mpTail = &mblk->b_cont;
39662dadd65SYuri Pankov 				ASSERT(*mpTail == NULL);
39762dadd65SYuri Pankov 
3985ceaf02cSYuri Pankov 				VMXNET3_DEBUG(dp, 3, "rx 0x%p on [%u]\n",
3995ceaf02cSYuri Pankov 				    (void *)mblk, rxdIdx);
40062dadd65SYuri Pankov 
40162dadd65SYuri Pankov 				if (eop) {
40262dadd65SYuri Pankov 					if (!compDesc->rcd.err) {
40362dadd65SYuri Pankov 						/*
40462dadd65SYuri Pankov 						 * Tag the mp if it was
40562dadd65SYuri Pankov 						 * checksummed by the H/W
40662dadd65SYuri Pankov 						 */
40762dadd65SYuri Pankov 						vmxnet3_rx_hwcksum(dp, mp,
40862dadd65SYuri Pankov 						    compDesc);
40962dadd65SYuri Pankov 					} else {
41062dadd65SYuri Pankov 						mpValid = B_FALSE;
41162dadd65SYuri Pankov 					}
41262dadd65SYuri Pankov 				}
41362dadd65SYuri Pankov 			} else {
41462dadd65SYuri Pankov 				/*
41562dadd65SYuri Pankov 				 * Keep the same buffer, we still need
41662dadd65SYuri Pankov 				 * to flip the gen bit
41762dadd65SYuri Pankov 				 */
41862dadd65SYuri Pankov 				rxDesc = VMXNET3_GET_DESC(cmdRing, rxdIdx);
41962dadd65SYuri Pankov 				rxDesc->rxd.gen = cmdRing->gen;
42062dadd65SYuri Pankov 				mpValid = B_FALSE;
42162dadd65SYuri Pankov 			}
42262dadd65SYuri Pankov 
42362dadd65SYuri Pankov 			VMXNET3_INC_RING_IDX(compRing, compRing->next2comp);
42462dadd65SYuri Pankov 			VMXNET3_INC_RING_IDX(cmdRing, cmdRing->next2fill);
42562dadd65SYuri Pankov 			compDesc = VMXNET3_GET_DESC(compRing,
42662dadd65SYuri Pankov 			    compRing->next2comp);
42762dadd65SYuri Pankov 		} while (!eop);
42862dadd65SYuri Pankov 
42962dadd65SYuri Pankov 		if (mp) {
43062dadd65SYuri Pankov 			if (mpValid) {
43162dadd65SYuri Pankov 				*mplistTail = mp;
43262dadd65SYuri Pankov 				mplistTail = &mp->b_next;
43362dadd65SYuri Pankov 				ASSERT(*mplistTail == NULL);
43462dadd65SYuri Pankov 			} else {
43562dadd65SYuri Pankov 				/* This message got holes, drop it */
43662dadd65SYuri Pankov 				freemsg(mp);
43762dadd65SYuri Pankov 			}
43862dadd65SYuri Pankov 		}
43962dadd65SYuri Pankov 	}
44062dadd65SYuri Pankov 
44162dadd65SYuri Pankov 	if (rxqCtrl->updateRxProd) {
44262dadd65SYuri Pankov 		uint32_t rxprod;
44362dadd65SYuri Pankov 
44462dadd65SYuri Pankov 		/*
44562dadd65SYuri Pankov 		 * All buffers are actually available, but we can't tell that to
44662dadd65SYuri Pankov 		 * the device because it may interpret that as an empty ring.
44762dadd65SYuri Pankov 		 * So skip one buffer.
44862dadd65SYuri Pankov 		 */
44962dadd65SYuri Pankov 		if (cmdRing->next2fill) {
45062dadd65SYuri Pankov 			rxprod = cmdRing->next2fill - 1;
45162dadd65SYuri Pankov 		} else {
45262dadd65SYuri Pankov 			rxprod = cmdRing->size - 1;
45362dadd65SYuri Pankov 		}
45462dadd65SYuri Pankov 		VMXNET3_BAR0_PUT32(dp, VMXNET3_REG_RXPROD, rxprod);
45562dadd65SYuri Pankov 	}
45662dadd65SYuri Pankov 
45762dadd65SYuri Pankov 	return (mplist);
45862dadd65SYuri Pankov }
459