13dec9fcdSqs /*
23dec9fcdSqs  * CDDL HEADER START
33dec9fcdSqs  *
43dec9fcdSqs  * The contents of this file are subject to the terms of the
53dec9fcdSqs  * Common Development and Distribution License (the "License").
63dec9fcdSqs  * You may not use this file except in compliance with the License.
73dec9fcdSqs  *
83dec9fcdSqs  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
93dec9fcdSqs  * or http://www.opensolaris.org/os/licensing.
103dec9fcdSqs  * See the License for the specific language governing permissions
113dec9fcdSqs  * and limitations under the License.
123dec9fcdSqs  *
133dec9fcdSqs  * When distributing Covered Code, include this CDDL HEADER in each
143dec9fcdSqs  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
153dec9fcdSqs  * If applicable, add the following below this CDDL HEADER, with the
163dec9fcdSqs  * fields enclosed by brackets "[]" replaced with your own identifying
173dec9fcdSqs  * information: Portions Copyright [yyyy] [name of copyright owner]
183dec9fcdSqs  *
193dec9fcdSqs  * CDDL HEADER END
203dec9fcdSqs  */
213dec9fcdSqs /*
22cf6ef894SMichael Speer  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
233dec9fcdSqs  * Use is subject to license terms.
243dec9fcdSqs  */
253dec9fcdSqs 
263dec9fcdSqs #include <hxge_impl.h>
273dec9fcdSqs #include <hxge_txdma.h>
283dec9fcdSqs #include <sys/llc1.h>
293dec9fcdSqs 
303dec9fcdSqs uint32_t hxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT;
31cf6ef894SMichael Speer uint32_t hxge_tx_minfree = 64;
323dec9fcdSqs uint32_t hxge_tx_intr_thres = 0;
333dec9fcdSqs uint32_t hxge_tx_max_gathers = TX_MAX_GATHER_POINTERS;
343dec9fcdSqs uint32_t hxge_tx_tiny_pack = 1;
353dec9fcdSqs uint32_t hxge_tx_use_bcopy = 1;
363dec9fcdSqs 
373dec9fcdSqs extern uint32_t hxge_tx_ring_size;
383dec9fcdSqs extern uint32_t hxge_bcopy_thresh;
393dec9fcdSqs extern uint32_t hxge_dvma_thresh;
403dec9fcdSqs extern uint32_t hxge_dma_stream_thresh;
413dec9fcdSqs extern dma_method_t hxge_force_dma;
423dec9fcdSqs 
433dec9fcdSqs /* Device register access attributes for PIO.  */
443dec9fcdSqs extern ddi_device_acc_attr_t hxge_dev_reg_acc_attr;
453dec9fcdSqs 
463dec9fcdSqs /* Device descriptor access attributes for DMA.  */
473dec9fcdSqs extern ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr;
483dec9fcdSqs 
493dec9fcdSqs /* Device buffer access attributes for DMA.  */
503dec9fcdSqs extern ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr;
513dec9fcdSqs extern ddi_dma_attr_t hxge_desc_dma_attr;
523dec9fcdSqs extern ddi_dma_attr_t hxge_tx_dma_attr;
533dec9fcdSqs 
543dec9fcdSqs static hxge_status_t hxge_map_txdma(p_hxge_t hxgep);
553dec9fcdSqs static void hxge_unmap_txdma(p_hxge_t hxgep);
563dec9fcdSqs static hxge_status_t hxge_txdma_hw_start(p_hxge_t hxgep);
573dec9fcdSqs static void hxge_txdma_hw_stop(p_hxge_t hxgep);
583dec9fcdSqs 
593dec9fcdSqs static hxge_status_t hxge_map_txdma_channel(p_hxge_t hxgep, uint16_t channel,
603dec9fcdSqs     p_hxge_dma_common_t *dma_buf_p, p_tx_ring_t *tx_desc_p,
613dec9fcdSqs     uint32_t num_chunks, p_hxge_dma_common_t *dma_cntl_p,
623dec9fcdSqs     p_tx_mbox_t *tx_mbox_p);
633dec9fcdSqs static void hxge_unmap_txdma_channel(p_hxge_t hxgep, uint16_t channel,
643dec9fcdSqs     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p);
653dec9fcdSqs static hxge_status_t hxge_map_txdma_channel_buf_ring(p_hxge_t hxgep, uint16_t,
663dec9fcdSqs     p_hxge_dma_common_t *, p_tx_ring_t *, uint32_t);
673dec9fcdSqs static void hxge_unmap_txdma_channel_buf_ring(p_hxge_t hxgep,
683dec9fcdSqs     p_tx_ring_t tx_ring_p);
693dec9fcdSqs static void hxge_map_txdma_channel_cfg_ring(p_hxge_t, uint16_t,
703dec9fcdSqs     p_hxge_dma_common_t *, p_tx_ring_t, p_tx_mbox_t *);
713dec9fcdSqs static void hxge_unmap_txdma_channel_cfg_ring(p_hxge_t hxgep,
723dec9fcdSqs     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p);
733dec9fcdSqs static hxge_status_t hxge_txdma_start_channel(p_hxge_t hxgep, uint16_t channel,
743dec9fcdSqs     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p);
753dec9fcdSqs static hxge_status_t hxge_txdma_stop_channel(p_hxge_t hxgep, uint16_t channel,
763dec9fcdSqs     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p);
773dec9fcdSqs static p_tx_ring_t hxge_txdma_get_ring(p_hxge_t hxgep, uint16_t channel);
783dec9fcdSqs static hxge_status_t hxge_tx_err_evnts(p_hxge_t hxgep, uint_t index,
793dec9fcdSqs     p_hxge_ldv_t ldvp, tdc_stat_t cs);
803dec9fcdSqs static p_tx_mbox_t hxge_txdma_get_mbox(p_hxge_t hxgep, uint16_t channel);
813dec9fcdSqs static hxge_status_t hxge_txdma_fatal_err_recover(p_hxge_t hxgep,
823dec9fcdSqs     uint16_t channel, p_tx_ring_t tx_ring_p);
833dec9fcdSqs static hxge_status_t hxge_tx_port_fatal_err_recover(p_hxge_t hxgep);
843dec9fcdSqs 
853dec9fcdSqs hxge_status_t
hxge_init_txdma_channels(p_hxge_t hxgep)863dec9fcdSqs hxge_init_txdma_channels(p_hxge_t hxgep)
873dec9fcdSqs {
883dec9fcdSqs 	hxge_status_t	status = HXGE_OK;
893dec9fcdSqs 	block_reset_t	reset_reg;
903dec9fcdSqs 
913dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_init_txdma_channels"));
923dec9fcdSqs 
933dec9fcdSqs 	/*
943dec9fcdSqs 	 * Reset TDC block from PEU to cleanup any unknown configuration.
953dec9fcdSqs 	 * This may be resulted from previous reboot.
963dec9fcdSqs 	 */
973dec9fcdSqs 	reset_reg.value = 0;
983dec9fcdSqs 	reset_reg.bits.tdc_rst = 1;
993dec9fcdSqs 	HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value);
1003dec9fcdSqs 
1013dec9fcdSqs 	HXGE_DELAY(1000);
1023dec9fcdSqs 
1033dec9fcdSqs 	status = hxge_map_txdma(hxgep);
1043dec9fcdSqs 	if (status != HXGE_OK) {
1053dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1063dec9fcdSqs 		    "<== hxge_init_txdma_channels: status 0x%x", status));
1073dec9fcdSqs 		return (status);
1083dec9fcdSqs 	}
1093dec9fcdSqs 
1103dec9fcdSqs 	status = hxge_txdma_hw_start(hxgep);
1113dec9fcdSqs 	if (status != HXGE_OK) {
1123dec9fcdSqs 		hxge_unmap_txdma(hxgep);
1133dec9fcdSqs 		return (status);
1143dec9fcdSqs 	}
1153dec9fcdSqs 
1163dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1173dec9fcdSqs 	    "<== hxge_init_txdma_channels: status 0x%x", status));
1183dec9fcdSqs 
1193dec9fcdSqs 	return (HXGE_OK);
1203dec9fcdSqs }
1213dec9fcdSqs 
1223dec9fcdSqs void
hxge_uninit_txdma_channels(p_hxge_t hxgep)1233dec9fcdSqs hxge_uninit_txdma_channels(p_hxge_t hxgep)
1243dec9fcdSqs {
1253dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_uninit_txdma_channels"));
1263dec9fcdSqs 
1273dec9fcdSqs 	hxge_txdma_hw_stop(hxgep);
1283dec9fcdSqs 	hxge_unmap_txdma(hxgep);
1293dec9fcdSqs 
1303dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_uinit_txdma_channels"));
1313dec9fcdSqs }
1323dec9fcdSqs 
1333dec9fcdSqs void
hxge_setup_dma_common(p_hxge_dma_common_t dest_p,p_hxge_dma_common_t src_p,uint32_t entries,uint32_t size)1343dec9fcdSqs hxge_setup_dma_common(p_hxge_dma_common_t dest_p, p_hxge_dma_common_t src_p,
1353dec9fcdSqs     uint32_t entries, uint32_t size)
1363dec9fcdSqs {
1373dec9fcdSqs 	size_t tsize;
1383dec9fcdSqs 	*dest_p = *src_p;
1393dec9fcdSqs 	tsize = size * entries;
1403dec9fcdSqs 	dest_p->alength = tsize;
1413dec9fcdSqs 	dest_p->nblocks = entries;
1423dec9fcdSqs 	dest_p->block_size = size;
1433dec9fcdSqs 	dest_p->offset += tsize;
1443dec9fcdSqs 
1453dec9fcdSqs 	src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize;
1463dec9fcdSqs 	src_p->alength -= tsize;
1473dec9fcdSqs 	src_p->dma_cookie.dmac_laddress += tsize;
1483dec9fcdSqs 	src_p->dma_cookie.dmac_size -= tsize;
1493dec9fcdSqs }
1503dec9fcdSqs 
1513dec9fcdSqs hxge_status_t
hxge_reset_txdma_channel(p_hxge_t hxgep,uint16_t channel,uint64_t reg_data)1523dec9fcdSqs hxge_reset_txdma_channel(p_hxge_t hxgep, uint16_t channel, uint64_t reg_data)
1533dec9fcdSqs {
1543dec9fcdSqs 	hpi_status_t	rs = HPI_SUCCESS;
1553dec9fcdSqs 	hxge_status_t	status = HXGE_OK;
1563dec9fcdSqs 	hpi_handle_t	handle;
1573dec9fcdSqs 
1583dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, " ==> hxge_reset_txdma_channel"));
1593dec9fcdSqs 
1603dec9fcdSqs 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
1613dec9fcdSqs 	if ((reg_data & TDC_TDR_RST_MASK) == TDC_TDR_RST_MASK) {
1623dec9fcdSqs 		rs = hpi_txdma_channel_reset(handle, channel);
1633dec9fcdSqs 	} else {
1643dec9fcdSqs 		rs = hpi_txdma_channel_control(handle, TXDMA_RESET, channel);
1653dec9fcdSqs 	}
1663dec9fcdSqs 
1673dec9fcdSqs 	if (rs != HPI_SUCCESS) {
1683dec9fcdSqs 		status = HXGE_ERROR | rs;
1693dec9fcdSqs 	}
1703dec9fcdSqs 
1713dec9fcdSqs 	/*
1723dec9fcdSqs 	 * Reset the tail (kick) register to 0. (Hardware will not reset it. Tx
1733dec9fcdSqs 	 * overflow fatal error if tail is not set to 0 after reset!
1743dec9fcdSqs 	 */
1753dec9fcdSqs 	TXDMA_REG_WRITE64(handle, TDC_TDR_KICK, channel, 0);
1763dec9fcdSqs 
1773dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, " <== hxge_reset_txdma_channel"));
1783dec9fcdSqs 
1793dec9fcdSqs 	return (status);
1803dec9fcdSqs }
1813dec9fcdSqs 
1823dec9fcdSqs hxge_status_t
hxge_init_txdma_channel_event_mask(p_hxge_t hxgep,uint16_t channel,tdc_int_mask_t * mask_p)1833dec9fcdSqs hxge_init_txdma_channel_event_mask(p_hxge_t hxgep, uint16_t channel,
1843dec9fcdSqs     tdc_int_mask_t *mask_p)
1853dec9fcdSqs {
1863dec9fcdSqs 	hpi_handle_t	handle;
1873dec9fcdSqs 	hpi_status_t	rs = HPI_SUCCESS;
1883dec9fcdSqs 	hxge_status_t	status = HXGE_OK;
1893dec9fcdSqs 
1903dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1913dec9fcdSqs 	    "<== hxge_init_txdma_channel_event_mask"));
1923dec9fcdSqs 
1933dec9fcdSqs 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
1943dec9fcdSqs 
1953dec9fcdSqs 	/*
1963dec9fcdSqs 	 * Mask off tx_rng_oflow since it is a false alarm. The driver
1973dec9fcdSqs 	 * ensures not over flowing the hardware and check the hardware
1983dec9fcdSqs 	 * status.
1993dec9fcdSqs 	 */
2003dec9fcdSqs 	mask_p->bits.tx_rng_oflow = 1;
2013dec9fcdSqs 	rs = hpi_txdma_event_mask(handle, OP_SET, channel, mask_p);
2023dec9fcdSqs 	if (rs != HPI_SUCCESS) {
2033dec9fcdSqs 		status = HXGE_ERROR | rs;
2043dec9fcdSqs 	}
2053dec9fcdSqs 
2063dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2073dec9fcdSqs 	    "==> hxge_init_txdma_channel_event_mask"));
2083dec9fcdSqs 	return (status);
2093dec9fcdSqs }
2103dec9fcdSqs 
2113dec9fcdSqs hxge_status_t
hxge_enable_txdma_channel(p_hxge_t hxgep,uint16_t channel,p_tx_ring_t tx_desc_p,p_tx_mbox_t mbox_p)2123dec9fcdSqs hxge_enable_txdma_channel(p_hxge_t hxgep,
2133dec9fcdSqs     uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p)
2143dec9fcdSqs {
2153dec9fcdSqs 	hpi_handle_t	handle;
2163dec9fcdSqs 	hpi_status_t	rs = HPI_SUCCESS;
2173dec9fcdSqs 	hxge_status_t	status = HXGE_OK;
2183dec9fcdSqs 
2193dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_enable_txdma_channel"));
2203dec9fcdSqs 
2213dec9fcdSqs 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
2223dec9fcdSqs 	/*
2233dec9fcdSqs 	 * Use configuration data composed at init time. Write to hardware the
2243dec9fcdSqs 	 * transmit ring configurations.
2253dec9fcdSqs 	 */
2263dec9fcdSqs 	rs = hpi_txdma_ring_config(handle, OP_SET, channel,
2273dec9fcdSqs 	    (uint64_t *)&(tx_desc_p->tx_ring_cfig.value));
2283dec9fcdSqs 
2293dec9fcdSqs 	if (rs != HPI_SUCCESS) {
2303dec9fcdSqs 		return (HXGE_ERROR | rs);
2313dec9fcdSqs 	}
2323dec9fcdSqs 
2333dec9fcdSqs 	/* Write to hardware the mailbox */
2343dec9fcdSqs 	rs = hpi_txdma_mbox_config(handle, OP_SET, channel,
2353dec9fcdSqs 	    (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress);
2363dec9fcdSqs 
2373dec9fcdSqs 	if (rs != HPI_SUCCESS) {
2383dec9fcdSqs 		return (HXGE_ERROR | rs);
2393dec9fcdSqs 	}
2403dec9fcdSqs 
2413dec9fcdSqs 	/* Start the DMA engine. */
2423dec9fcdSqs 	rs = hpi_txdma_channel_init_enable(handle, channel);
2433dec9fcdSqs 	if (rs != HPI_SUCCESS) {
2443dec9fcdSqs 		return (HXGE_ERROR | rs);
2453dec9fcdSqs 	}
2463dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_enable_txdma_channel"));
2473dec9fcdSqs 	return (status);
2483dec9fcdSqs }
2493dec9fcdSqs 
2503dec9fcdSqs void
hxge_fill_tx_hdr(p_mblk_t mp,boolean_t fill_len,boolean_t l4_cksum,int pkt_len,uint8_t npads,p_tx_pkt_hdr_all_t pkthdrp)2513dec9fcdSqs hxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len, boolean_t l4_cksum,
2523dec9fcdSqs     int pkt_len, uint8_t npads, p_tx_pkt_hdr_all_t pkthdrp)
2533dec9fcdSqs {
2543dec9fcdSqs 	p_tx_pkt_header_t	hdrp;
2553dec9fcdSqs 	p_mblk_t		nmp;
2563dec9fcdSqs 	uint64_t		tmp;
2573dec9fcdSqs 	size_t			mblk_len;
2583dec9fcdSqs 	size_t			iph_len;
2593dec9fcdSqs 	size_t			hdrs_size;
2603dec9fcdSqs 	uint8_t			*ip_buf;
2613dec9fcdSqs 	uint16_t		eth_type;
2623dec9fcdSqs 	uint8_t			ipproto;
2633dec9fcdSqs 	boolean_t		is_vlan = B_FALSE;
2643dec9fcdSqs 	size_t			eth_hdr_size;
2653dec9fcdSqs 	uint8_t hdrs_buf[sizeof (struct ether_header) + 64 + sizeof (uint32_t)];
2663dec9fcdSqs 
2673dec9fcdSqs 	HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: mp $%p", mp));
2683dec9fcdSqs 
2693dec9fcdSqs 	/*
2703dec9fcdSqs 	 * Caller should zero out the headers first.
2713dec9fcdSqs 	 */
2723dec9fcdSqs 	hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr;
2733dec9fcdSqs 
2743dec9fcdSqs 	if (fill_len) {
2753dec9fcdSqs 		HXGE_DEBUG_MSG((NULL, TX_CTL,
2763dec9fcdSqs 		    "==> hxge_fill_tx_hdr: pkt_len %d npads %d",
2773dec9fcdSqs 		    pkt_len, npads));
2783dec9fcdSqs 		tmp = (uint64_t)pkt_len;
2793dec9fcdSqs 		hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT);
2803dec9fcdSqs 
2813dec9fcdSqs 		goto fill_tx_header_done;
2823dec9fcdSqs 	}
2833dec9fcdSqs 	tmp = (uint64_t)npads;
2843dec9fcdSqs 	hdrp->value |= (tmp << TX_PKT_HEADER_PAD_SHIFT);
2853dec9fcdSqs 
2863dec9fcdSqs 	/*
2873dec9fcdSqs 	 * mp is the original data packet (does not include the Neptune
2883dec9fcdSqs 	 * transmit header).
2893dec9fcdSqs 	 */
2903dec9fcdSqs 	nmp = mp;
2913dec9fcdSqs 	mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr;
2923dec9fcdSqs 	HXGE_DEBUG_MSG((NULL, TX_CTL,
2933dec9fcdSqs 	    "==> hxge_fill_tx_hdr: mp $%p b_rptr $%p len %d",
2943dec9fcdSqs 	    mp, nmp->b_rptr, mblk_len));
2953dec9fcdSqs 	ip_buf = NULL;
2963dec9fcdSqs 	bcopy(nmp->b_rptr, &hdrs_buf[0], sizeof (struct ether_vlan_header));
2973dec9fcdSqs 	eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type);
2983dec9fcdSqs 	HXGE_DEBUG_MSG((NULL, TX_CTL,
2993dec9fcdSqs 	    "==> : hxge_fill_tx_hdr: (value 0x%llx) ether type 0x%x",
3003dec9fcdSqs 	    eth_type, hdrp->value));
3013dec9fcdSqs 
3023dec9fcdSqs 	if (eth_type < ETHERMTU) {
3033dec9fcdSqs 		tmp = 1ull;
3043dec9fcdSqs 		hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT);
3053dec9fcdSqs 		HXGE_DEBUG_MSG((NULL, TX_CTL,
3063dec9fcdSqs 		    "==> hxge_tx_pkt_hdr_init: LLC value 0x%llx", hdrp->value));
3073dec9fcdSqs 		if (*(hdrs_buf + sizeof (struct ether_header)) ==
3083dec9fcdSqs 		    LLC_SNAP_SAP) {
3093dec9fcdSqs 			eth_type = ntohs(*((uint16_t *)(hdrs_buf +
3103dec9fcdSqs 			    sizeof (struct ether_header) + 6)));
3113dec9fcdSqs 			HXGE_DEBUG_MSG((NULL, TX_CTL,
3123dec9fcdSqs 			    "==> hxge_tx_pkt_hdr_init: LLC ether type 0x%x",
3133dec9fcdSqs 			    eth_type));
3143dec9fcdSqs 		} else {
3153dec9fcdSqs 			goto fill_tx_header_done;
3163dec9fcdSqs 		}
3173dec9fcdSqs 	} else if (eth_type == VLAN_ETHERTYPE) {
3183dec9fcdSqs 		tmp = 1ull;
3193dec9fcdSqs 		hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT);
3203dec9fcdSqs 
3213dec9fcdSqs 		eth_type = ntohs(((struct ether_vlan_header *)
3223dec9fcdSqs 		    hdrs_buf)->ether_type);
3233dec9fcdSqs 		is_vlan = B_TRUE;
3243dec9fcdSqs 		HXGE_DEBUG_MSG((NULL, TX_CTL,
3253dec9fcdSqs 		    "==> hxge_tx_pkt_hdr_init: VLAN value 0x%llx",
3263dec9fcdSqs 		    hdrp->value));
3273dec9fcdSqs 	}
3283dec9fcdSqs 	if (!is_vlan) {
3293dec9fcdSqs 		eth_hdr_size = sizeof (struct ether_header);
3303dec9fcdSqs 	} else {
3313dec9fcdSqs 		eth_hdr_size = sizeof (struct ether_vlan_header);
3323dec9fcdSqs 	}
3333dec9fcdSqs 
3343dec9fcdSqs 	switch (eth_type) {
3353dec9fcdSqs 	case ETHERTYPE_IP:
3363dec9fcdSqs 		if (mblk_len > eth_hdr_size + sizeof (uint8_t)) {
3373dec9fcdSqs 			ip_buf = nmp->b_rptr + eth_hdr_size;
3383dec9fcdSqs 			mblk_len -= eth_hdr_size;
3393dec9fcdSqs 			iph_len = ((*ip_buf) & 0x0f);
3403dec9fcdSqs 			if (mblk_len > (iph_len + sizeof (uint32_t))) {
3413dec9fcdSqs 				ip_buf = nmp->b_rptr;
3423dec9fcdSqs 				ip_buf += eth_hdr_size;
3433dec9fcdSqs 			} else {
3443dec9fcdSqs 				ip_buf = NULL;
3453dec9fcdSqs 			}
3463dec9fcdSqs 		}
3473dec9fcdSqs 		if (ip_buf == NULL) {
3483dec9fcdSqs 			hdrs_size = 0;
3493dec9fcdSqs 			((p_ether_header_t)hdrs_buf)->ether_type = 0;
3503dec9fcdSqs 			while ((nmp) && (hdrs_size < sizeof (hdrs_buf))) {
3513dec9fcdSqs 				mblk_len = (size_t)nmp->b_wptr -
3523dec9fcdSqs 				    (size_t)nmp->b_rptr;
3533dec9fcdSqs 				if (mblk_len >=
3543dec9fcdSqs 				    (sizeof (hdrs_buf) - hdrs_size))
3553dec9fcdSqs 					mblk_len = sizeof (hdrs_buf) -
3563dec9fcdSqs 					    hdrs_size;
3573dec9fcdSqs 				bcopy(nmp->b_rptr,
3583dec9fcdSqs 				    &hdrs_buf[hdrs_size], mblk_len);
3593dec9fcdSqs 				hdrs_size += mblk_len;
3603dec9fcdSqs 				nmp = nmp->b_cont;
3613dec9fcdSqs 			}
3623dec9fcdSqs 			ip_buf = hdrs_buf;
3633dec9fcdSqs 			ip_buf += eth_hdr_size;
3643dec9fcdSqs 			iph_len = ((*ip_buf) & 0x0f);
3653dec9fcdSqs 		}
3663dec9fcdSqs 		ipproto = ip_buf[9];
3673dec9fcdSqs 
3683dec9fcdSqs 		tmp = (uint64_t)iph_len;
3693dec9fcdSqs 		hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT);
3703dec9fcdSqs 		tmp = (uint64_t)(eth_hdr_size >> 1);
3713dec9fcdSqs 		hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
3723dec9fcdSqs 
3733dec9fcdSqs 		HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: IPv4 "
3743dec9fcdSqs 		    " iph_len %d l3start %d eth_hdr_size %d proto 0x%x"
3753dec9fcdSqs 		    "tmp 0x%x", iph_len, hdrp->bits.l3start, eth_hdr_size,
3763dec9fcdSqs 		    ipproto, tmp));
3773dec9fcdSqs 		HXGE_DEBUG_MSG((NULL, TX_CTL,
3783dec9fcdSqs 		    "==> hxge_tx_pkt_hdr_init: IP value 0x%llx", hdrp->value));
3793dec9fcdSqs 		break;
3803dec9fcdSqs 
3813dec9fcdSqs 	case ETHERTYPE_IPV6:
3823dec9fcdSqs 		hdrs_size = 0;
3833dec9fcdSqs 		((p_ether_header_t)hdrs_buf)->ether_type = 0;
3843dec9fcdSqs 		while ((nmp) && (hdrs_size < sizeof (hdrs_buf))) {
3853dec9fcdSqs 			mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr;
3863dec9fcdSqs 			if (mblk_len >= (sizeof (hdrs_buf) - hdrs_size))
3873dec9fcdSqs 				mblk_len = sizeof (hdrs_buf) - hdrs_size;
3883dec9fcdSqs 			bcopy(nmp->b_rptr, &hdrs_buf[hdrs_size], mblk_len);
3893dec9fcdSqs 			hdrs_size += mblk_len;
3903dec9fcdSqs 			nmp = nmp->b_cont;
3913dec9fcdSqs 		}
3923dec9fcdSqs 		ip_buf = hdrs_buf;
3933dec9fcdSqs 		ip_buf += eth_hdr_size;
3943dec9fcdSqs 
3953dec9fcdSqs 		tmp = 1ull;
3963dec9fcdSqs 		hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT);
3973dec9fcdSqs 
3983dec9fcdSqs 		tmp = (eth_hdr_size >> 1);
3993dec9fcdSqs 		hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
4003dec9fcdSqs 
4013dec9fcdSqs 		/* byte 6 is the next header protocol */
4023dec9fcdSqs 		ipproto = ip_buf[6];
4033dec9fcdSqs 
4043dec9fcdSqs 		HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: IPv6 "
4053dec9fcdSqs 		    " iph_len %d l3start %d eth_hdr_size %d proto 0x%x",
4063dec9fcdSqs 		    iph_len, hdrp->bits.l3start, eth_hdr_size, ipproto));
4073dec9fcdSqs 		HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_tx_pkt_hdr_init: IPv6 "
4083dec9fcdSqs 		    "value 0x%llx", hdrp->value));
4093dec9fcdSqs 		break;
4103dec9fcdSqs 
4113dec9fcdSqs 	default:
4123dec9fcdSqs 		HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: non-IP"));
4133dec9fcdSqs 		goto fill_tx_header_done;
4143dec9fcdSqs 	}
4153dec9fcdSqs 
4163dec9fcdSqs 	switch (ipproto) {
4173dec9fcdSqs 	case IPPROTO_TCP:
4183dec9fcdSqs 		HXGE_DEBUG_MSG((NULL, TX_CTL,
4193dec9fcdSqs 		    "==> hxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum));
4203dec9fcdSqs 		if (l4_cksum) {
4213dec9fcdSqs 			tmp = 1ull;
4223dec9fcdSqs 			hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT);
4233dec9fcdSqs 			HXGE_DEBUG_MSG((NULL, TX_CTL,
4243dec9fcdSqs 			    "==> hxge_tx_pkt_hdr_init: TCP CKSUM"
4253dec9fcdSqs 			    "value 0x%llx", hdrp->value));
4263dec9fcdSqs 		}
4273dec9fcdSqs 		HXGE_DEBUG_MSG((NULL, TX_CTL,
4283dec9fcdSqs 		    "==> hxge_tx_pkt_hdr_init: TCP value 0x%llx", hdrp->value));
4293dec9fcdSqs 		break;
4303dec9fcdSqs 
4313dec9fcdSqs 	case IPPROTO_UDP:
4323dec9fcdSqs 		HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: UDP"));
4333dec9fcdSqs 		if (l4_cksum) {
4343dec9fcdSqs 			tmp = 0x2ull;
4353dec9fcdSqs 			hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT);
4363dec9fcdSqs 		}
4373dec9fcdSqs 		HXGE_DEBUG_MSG((NULL, TX_CTL,
4383dec9fcdSqs 		    "==> hxge_tx_pkt_hdr_init: UDP value 0x%llx",
4393dec9fcdSqs 		    hdrp->value));
4403dec9fcdSqs 		break;
4413dec9fcdSqs 
4423dec9fcdSqs 	default:
4433dec9fcdSqs 		goto fill_tx_header_done;
4443dec9fcdSqs 	}
4453dec9fcdSqs 
4463dec9fcdSqs fill_tx_header_done:
4473dec9fcdSqs 	HXGE_DEBUG_MSG((NULL, TX_CTL,
4483dec9fcdSqs 	    "==> hxge_fill_tx_hdr: pkt_len %d npads %d value 0x%llx",
4493dec9fcdSqs 	    pkt_len, npads, hdrp->value));
4503dec9fcdSqs 	HXGE_DEBUG_MSG((NULL, TX_CTL, "<== hxge_fill_tx_hdr"));
4513dec9fcdSqs }
4523dec9fcdSqs 
4533dec9fcdSqs /*ARGSUSED*/
4543dec9fcdSqs p_mblk_t
hxge_tx_pkt_header_reserve(p_mblk_t mp,uint8_t * npads)4553dec9fcdSqs hxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads)
4563dec9fcdSqs {
4573dec9fcdSqs 	p_mblk_t newmp = NULL;
4583dec9fcdSqs 
4593dec9fcdSqs 	if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) {
4603dec9fcdSqs 		HXGE_DEBUG_MSG((NULL, TX_CTL,
4613dec9fcdSqs 		    "<== hxge_tx_pkt_header_reserve: allocb failed"));
4623dec9fcdSqs 		return (NULL);
4633dec9fcdSqs 	}
4643dec9fcdSqs 	HXGE_DEBUG_MSG((NULL, TX_CTL,
4653dec9fcdSqs 	    "==> hxge_tx_pkt_header_reserve: get new mp"));
4663dec9fcdSqs 	DB_TYPE(newmp) = M_DATA;
4673dec9fcdSqs 	newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp);
4683dec9fcdSqs 	linkb(newmp, mp);
4693dec9fcdSqs 	newmp->b_rptr -= TX_PKT_HEADER_SIZE;
4703dec9fcdSqs 
4713dec9fcdSqs 	HXGE_DEBUG_MSG((NULL, TX_CTL,
4723dec9fcdSqs 	    "==>hxge_tx_pkt_header_reserve: b_rptr $%p b_wptr $%p",
4733dec9fcdSqs 	    newmp->b_rptr, newmp->b_wptr));
4743dec9fcdSqs 	HXGE_DEBUG_MSG((NULL, TX_CTL,
4753dec9fcdSqs 	    "<== hxge_tx_pkt_header_reserve: use new mp"));
4763dec9fcdSqs 	return (newmp);
4773dec9fcdSqs }
4783dec9fcdSqs 
4793dec9fcdSqs int
hxge_tx_pkt_nmblocks(p_mblk_t mp,int * tot_xfer_len_p)4803dec9fcdSqs hxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p)
4813dec9fcdSqs {
4823dec9fcdSqs 	uint_t		nmblks;
4833dec9fcdSqs 	ssize_t		len;
4843dec9fcdSqs 	uint_t		pkt_len;
4853dec9fcdSqs 	p_mblk_t	nmp, bmp, tmp;
4863dec9fcdSqs 	uint8_t		*b_wptr;
4873dec9fcdSqs 
4883dec9fcdSqs 	HXGE_DEBUG_MSG((NULL, TX_CTL,
4893dec9fcdSqs 	    "==> hxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p len %d",
4903dec9fcdSqs 	    mp, mp->b_rptr, mp->b_wptr, MBLKL(mp)));
4913dec9fcdSqs 
4923dec9fcdSqs 	nmp = mp;
4933dec9fcdSqs 	bmp = mp;
4943dec9fcdSqs 	nmblks = 0;
4953dec9fcdSqs 	pkt_len = 0;
4963dec9fcdSqs 	*tot_xfer_len_p = 0;
4973dec9fcdSqs 
4983dec9fcdSqs 	while (nmp) {
4993dec9fcdSqs 		len = MBLKL(nmp);
5003dec9fcdSqs 		HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_tx_pkt_nmblocks: "
5013dec9fcdSqs 		    "len %d pkt_len %d nmblks %d tot_xfer_len %d",
5023dec9fcdSqs 		    len, pkt_len, nmblks, *tot_xfer_len_p));
5033dec9fcdSqs 
5043dec9fcdSqs 		if (len <= 0) {
5053dec9fcdSqs 			bmp = nmp;
5063dec9fcdSqs 			nmp = nmp->b_cont;
5073dec9fcdSqs 			HXGE_DEBUG_MSG((NULL, TX_CTL,
5083dec9fcdSqs 			    "==> hxge_tx_pkt_nmblocks:"
5093dec9fcdSqs 			    " len (0) pkt_len %d nmblks %d", pkt_len, nmblks));
5103dec9fcdSqs 			continue;
5113dec9fcdSqs 		}
5123dec9fcdSqs 		*tot_xfer_len_p += len;
5133dec9fcdSqs 		HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_tx_pkt_nmblocks: "
5143dec9fcdSqs 		    "len %d pkt_len %d nmblks %d tot_xfer_len %d",
5153dec9fcdSqs 		    len, pkt_len, nmblks, *tot_xfer_len_p));
5163dec9fcdSqs 
5173dec9fcdSqs 		if (len < hxge_bcopy_thresh) {
5183dec9fcdSqs 			HXGE_DEBUG_MSG((NULL, TX_CTL,
5193dec9fcdSqs 			    "==> hxge_tx_pkt_nmblocks: "
5203dec9fcdSqs 			    "len %d (< thresh) pkt_len %d nmblks %d",
5213dec9fcdSqs 			    len, pkt_len, nmblks));
5223dec9fcdSqs 			if (pkt_len == 0)
5233dec9fcdSqs 				nmblks++;
5243dec9fcdSqs 			pkt_len += len;
5253dec9fcdSqs 			if (pkt_len >= hxge_bcopy_thresh) {
5263dec9fcdSqs 				pkt_len = 0;
5273dec9fcdSqs 				len = 0;
5283dec9fcdSqs 				nmp = bmp;
5293dec9fcdSqs 			}
5303dec9fcdSqs 		} else {
5313dec9fcdSqs 			HXGE_DEBUG_MSG((NULL, TX_CTL,
5323dec9fcdSqs 			    "==> hxge_tx_pkt_nmblocks: "
5333dec9fcdSqs 			    "len %d (> thresh) pkt_len %d nmblks %d",
5343dec9fcdSqs 			    len, pkt_len, nmblks));
5353dec9fcdSqs 			pkt_len = 0;
5363dec9fcdSqs 			nmblks++;
5373dec9fcdSqs 			/*
5383dec9fcdSqs 			 * Hardware limits the transfer length to 4K. If len is
5393dec9fcdSqs 			 * more than 4K, we need to break it up to at most 2
5403dec9fcdSqs 			 * more blocks.
5413dec9fcdSqs 			 */
5423dec9fcdSqs 			if (len > TX_MAX_TRANSFER_LENGTH) {
5433dec9fcdSqs 				uint32_t nsegs;
5443dec9fcdSqs 
5453dec9fcdSqs 				HXGE_DEBUG_MSG((NULL, TX_CTL,
5463dec9fcdSqs 				    "==> hxge_tx_pkt_nmblocks: "
5473dec9fcdSqs 				    "len %d pkt_len %d nmblks %d nsegs %d",
5483dec9fcdSqs 				    len, pkt_len, nmblks, nsegs));
5493dec9fcdSqs 				nsegs = 1;
5503dec9fcdSqs 				if (len % (TX_MAX_TRANSFER_LENGTH * 2)) {
5513dec9fcdSqs 					++nsegs;
5523dec9fcdSqs 				}
5533dec9fcdSqs 				do {
5543dec9fcdSqs 					b_wptr = nmp->b_rptr +
5553dec9fcdSqs 					    TX_MAX_TRANSFER_LENGTH;
5563dec9fcdSqs 					nmp->b_wptr = b_wptr;
5573dec9fcdSqs 					if ((tmp = dupb(nmp)) == NULL) {
5583dec9fcdSqs 						return (0);
5593dec9fcdSqs 					}
5603dec9fcdSqs 					tmp->b_rptr = b_wptr;
5613dec9fcdSqs 					tmp->b_wptr = nmp->b_wptr;
5623dec9fcdSqs 					tmp->b_cont = nmp->b_cont;
5633dec9fcdSqs 					nmp->b_cont = tmp;
5643dec9fcdSqs 					nmblks++;
5653dec9fcdSqs 					if (--nsegs) {
5663dec9fcdSqs 						nmp = tmp;
5673dec9fcdSqs 					}
5683dec9fcdSqs 				} while (nsegs);
5693dec9fcdSqs 				nmp = tmp;
5703dec9fcdSqs 			}
5713dec9fcdSqs 		}
5723dec9fcdSqs 
5733dec9fcdSqs 		/*
5743dec9fcdSqs 		 * Hardware limits the transmit gather pointers to 15.
5753dec9fcdSqs 		 */
5763dec9fcdSqs 		if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) >
5773dec9fcdSqs 		    TX_MAX_GATHER_POINTERS) {
5783dec9fcdSqs 			HXGE_DEBUG_MSG((NULL, TX_CTL,
5793dec9fcdSqs 			    "==> hxge_tx_pkt_nmblocks: pull msg - "
5803dec9fcdSqs 			    "len %d pkt_len %d nmblks %d",
5813dec9fcdSqs 			    len, pkt_len, nmblks));
5823dec9fcdSqs 			/* Pull all message blocks from b_cont */
5833dec9fcdSqs 			if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) {
5843dec9fcdSqs 				return (0);
5853dec9fcdSqs 			}
5863dec9fcdSqs 			freemsg(nmp->b_cont);
5873dec9fcdSqs 			nmp->b_cont = tmp;
5883dec9fcdSqs 			pkt_len = 0;
5893dec9fcdSqs 		}
5903dec9fcdSqs 		bmp = nmp;
5913dec9fcdSqs 		nmp = nmp->b_cont;
5923dec9fcdSqs 	}
5933dec9fcdSqs 
5943dec9fcdSqs 	HXGE_DEBUG_MSG((NULL, TX_CTL,
5953dec9fcdSqs 	    "<== hxge_tx_pkt_nmblocks: rptr $%p wptr $%p "
5963dec9fcdSqs 	    "nmblks %d len %d tot_xfer_len %d",
5973dec9fcdSqs 	    mp->b_rptr, mp->b_wptr, nmblks, MBLKL(mp), *tot_xfer_len_p));
5983dec9fcdSqs 	return (nmblks);
5993dec9fcdSqs }
6003dec9fcdSqs 
6013dec9fcdSqs boolean_t
hxge_txdma_reclaim(p_hxge_t hxgep,p_tx_ring_t tx_ring_p,int nmblks)6023dec9fcdSqs hxge_txdma_reclaim(p_hxge_t hxgep, p_tx_ring_t tx_ring_p, int nmblks)
6033dec9fcdSqs {
6043dec9fcdSqs 	boolean_t		status = B_TRUE;
6053dec9fcdSqs 	p_hxge_dma_common_t	tx_desc_dma_p;
6063dec9fcdSqs 	hxge_dma_common_t	desc_area;
6073dec9fcdSqs 	p_tx_desc_t		tx_desc_ring_vp;
6083dec9fcdSqs 	p_tx_desc_t		tx_desc_p;
6093dec9fcdSqs 	p_tx_desc_t		tx_desc_pp;
6103dec9fcdSqs 	tx_desc_t		r_tx_desc;
6113dec9fcdSqs 	p_tx_msg_t		tx_msg_ring;
6123dec9fcdSqs 	p_tx_msg_t		tx_msg_p;
6133dec9fcdSqs 	hpi_handle_t		handle;
6143dec9fcdSqs 	tdc_tdr_head_t		tx_head;
6153dec9fcdSqs 	uint32_t		pkt_len;
6163dec9fcdSqs 	uint_t			tx_rd_index;
6173dec9fcdSqs 	uint16_t		head_index, tail_index;
6183dec9fcdSqs 	uint8_t			tdc;
6193dec9fcdSqs 	boolean_t		head_wrap, tail_wrap;
6203dec9fcdSqs 	p_hxge_tx_ring_stats_t	tdc_stats;
6213dec9fcdSqs 	tdc_byte_cnt_t		byte_cnt;
6223dec9fcdSqs 	tdc_tdr_qlen_t		qlen;
6233dec9fcdSqs 	int			rc;
6243dec9fcdSqs 
6253dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_reclaim"));
6263dec9fcdSqs 
6273dec9fcdSqs 	status = ((tx_ring_p->descs_pending < hxge_reclaim_pending) &&
6283dec9fcdSqs 	    (nmblks != 0));
6293dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL,
6303dec9fcdSqs 	    "==> hxge_txdma_reclaim: pending %d  reclaim %d nmblks %d",
6313dec9fcdSqs 	    tx_ring_p->descs_pending, hxge_reclaim_pending, nmblks));
6323dec9fcdSqs 
6333dec9fcdSqs 	if (!status) {
6343dec9fcdSqs 		tx_desc_dma_p = &tx_ring_p->tdc_desc;
6353dec9fcdSqs 		desc_area = tx_ring_p->tdc_desc;
6363dec9fcdSqs 		tx_desc_ring_vp = tx_desc_dma_p->kaddrp;
6373dec9fcdSqs 		tx_desc_ring_vp = (p_tx_desc_t)DMA_COMMON_VPTR(desc_area);
6383dec9fcdSqs 		tx_rd_index = tx_ring_p->rd_index;
6393dec9fcdSqs 		tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
6403dec9fcdSqs 		tx_msg_ring = tx_ring_p->tx_msg_ring;
6413dec9fcdSqs 		tx_msg_p = &tx_msg_ring[tx_rd_index];
6423dec9fcdSqs 		tdc = tx_ring_p->tdc;
6433dec9fcdSqs 		tdc_stats = tx_ring_p->tdc_stats;
6443dec9fcdSqs 		if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) {
6453dec9fcdSqs 			tdc_stats->tx_max_pend = tx_ring_p->descs_pending;
6463dec9fcdSqs 		}
6473dec9fcdSqs 		tail_index = tx_ring_p->wr_index;
6483dec9fcdSqs 		tail_wrap = tx_ring_p->wr_index_wrap;
6493dec9fcdSqs 
6503dec9fcdSqs 		/*
6513dec9fcdSqs 		 * tdc_byte_cnt reg can be used to get bytes transmitted. It
6523dec9fcdSqs 		 * includes padding too in case of runt packets.
6533dec9fcdSqs 		 */
6543dec9fcdSqs 		handle = HXGE_DEV_HPI_HANDLE(hxgep);
6553dec9fcdSqs 		TXDMA_REG_READ64(handle, TDC_BYTE_CNT, tdc, &byte_cnt.value);
6563dec9fcdSqs 		tdc_stats->obytes_with_pad += byte_cnt.bits.byte_count;
6573dec9fcdSqs 
6583dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
6593dec9fcdSqs 		    "==> hxge_txdma_reclaim: tdc %d tx_rd_index %d "
6603dec9fcdSqs 		    "tail_index %d tail_wrap %d tx_desc_p $%p ($%p) ",
6613dec9fcdSqs 		    tdc, tx_rd_index, tail_index, tail_wrap,
6623dec9fcdSqs 		    tx_desc_p, (*(uint64_t *)tx_desc_p)));
6633dec9fcdSqs 
6643dec9fcdSqs 		/*
6653dec9fcdSqs 		 * Read the hardware maintained transmit head and wrap around
6663dec9fcdSqs 		 * bit.
6673dec9fcdSqs 		 */
6683dec9fcdSqs 		TXDMA_REG_READ64(handle, TDC_TDR_HEAD, tdc, &tx_head.value);
6693dec9fcdSqs 		head_index = tx_head.bits.head;
6703dec9fcdSqs 		head_wrap = tx_head.bits.wrap;
6713dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
6723dec9fcdSqs 		    "==> hxge_txdma_reclaim: "
6733dec9fcdSqs 		    "tx_rd_index %d tail %d tail_wrap %d head %d wrap %d",
6743dec9fcdSqs 		    tx_rd_index, tail_index, tail_wrap, head_index, head_wrap));
6753dec9fcdSqs 
6763dec9fcdSqs 		/*
6773dec9fcdSqs 		 * For debug only. This can be used to verify the qlen and make
6783dec9fcdSqs 		 * sure the hardware is wrapping the Tdr correctly.
6793dec9fcdSqs 		 */
6803dec9fcdSqs 		TXDMA_REG_READ64(handle, TDC_TDR_QLEN, tdc, &qlen.value);
6813dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
6823dec9fcdSqs 		    "==> hxge_txdma_reclaim: tdr_qlen %d tdr_pref_qlen %d",
6833dec9fcdSqs 		    qlen.bits.tdr_qlen, qlen.bits.tdr_pref_qlen));
6843dec9fcdSqs 
6853dec9fcdSqs 		if (head_index == tail_index) {
6863dec9fcdSqs 			if (TXDMA_RING_EMPTY(head_index, head_wrap, tail_index,
6873dec9fcdSqs 			    tail_wrap) && (head_index == tx_rd_index)) {
6883dec9fcdSqs 				HXGE_DEBUG_MSG((hxgep, TX_CTL,
6893dec9fcdSqs 				    "==> hxge_txdma_reclaim: EMPTY"));
6903dec9fcdSqs 				return (B_TRUE);
6913dec9fcdSqs 			}
6923dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, TX_CTL,
6933dec9fcdSqs 			    "==> hxge_txdma_reclaim: Checking if ring full"));
6943dec9fcdSqs 			if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
6953dec9fcdSqs 			    tail_wrap)) {
6963dec9fcdSqs 				HXGE_DEBUG_MSG((hxgep, TX_CTL,
6973dec9fcdSqs 				    "==> hxge_txdma_reclaim: full"));
6983dec9fcdSqs 				return (B_FALSE);
6993dec9fcdSqs 			}
7003dec9fcdSqs 		}
7013dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
7023dec9fcdSqs 		    "==> hxge_txdma_reclaim: tx_rd_index and head_index"));
7033dec9fcdSqs 
7043dec9fcdSqs 		/* XXXX: limit the # of reclaims */
7053dec9fcdSqs 		tx_desc_pp = &r_tx_desc;
7063dec9fcdSqs 		while ((tx_rd_index != head_index) &&
7073dec9fcdSqs 		    (tx_ring_p->descs_pending != 0)) {
7083dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, TX_CTL,
7093dec9fcdSqs 			    "==> hxge_txdma_reclaim: Checking if pending"));
7103dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, TX_CTL,
7113dec9fcdSqs 			    "==> hxge_txdma_reclaim: descs_pending %d ",
7123dec9fcdSqs 			    tx_ring_p->descs_pending));
7133dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, TX_CTL,
7143dec9fcdSqs 			    "==> hxge_txdma_reclaim: "
7153dec9fcdSqs 			    "(tx_rd_index %d head_index %d (tx_desc_p $%p)",
7163dec9fcdSqs 			    tx_rd_index, head_index, tx_desc_p));
7173dec9fcdSqs 
7183dec9fcdSqs 			tx_desc_pp->value = tx_desc_p->value;
7193dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, TX_CTL,
7203dec9fcdSqs 			    "==> hxge_txdma_reclaim: "
7213dec9fcdSqs 			    "(tx_rd_index %d head_index %d "
7223dec9fcdSqs 			    "tx_desc_p $%p (desc value 0x%llx) ",
7233dec9fcdSqs 			    tx_rd_index, head_index,
7243dec9fcdSqs 			    tx_desc_pp, (*(uint64_t *)tx_desc_pp)));
7253dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, TX_CTL,
7263dec9fcdSqs 			    "==> hxge_txdma_reclaim: dump desc:"));
7273dec9fcdSqs 
7283dec9fcdSqs 			/*
7293dec9fcdSqs 			 * tdc_byte_cnt reg can be used to get bytes
7303dec9fcdSqs 			 * transmitted
7313dec9fcdSqs 			 */
7323dec9fcdSqs 			pkt_len = tx_desc_pp->bits.tr_len;
7333dec9fcdSqs 			tdc_stats->obytes += pkt_len;
7343dec9fcdSqs 			tdc_stats->opackets += tx_desc_pp->bits.sop;
7353dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, TX_CTL,
7363dec9fcdSqs 			    "==> hxge_txdma_reclaim: pkt_len %d "
7373dec9fcdSqs 			    "tdc channel %d opackets %d",
7383dec9fcdSqs 			    pkt_len, tdc, tdc_stats->opackets));
7393dec9fcdSqs 
7403dec9fcdSqs 			if (tx_msg_p->flags.dma_type == USE_DVMA) {
7413dec9fcdSqs 				HXGE_DEBUG_MSG((hxgep, TX_CTL,
7423dec9fcdSqs 				    "tx_desc_p = $%p tx_desc_pp = $%p "
7433dec9fcdSqs 				    "index = %d",
7443dec9fcdSqs 				    tx_desc_p, tx_desc_pp,
7453dec9fcdSqs 				    tx_ring_p->rd_index));
7463dec9fcdSqs 				(void) dvma_unload(tx_msg_p->dvma_handle,
7473dec9fcdSqs 				    0, -1);
7483dec9fcdSqs 				tx_msg_p->dvma_handle = NULL;
7493dec9fcdSqs 				if (tx_ring_p->dvma_wr_index ==
7503dec9fcdSqs 				    tx_ring_p->dvma_wrap_mask) {
7513dec9fcdSqs 					tx_ring_p->dvma_wr_index = 0;
7523dec9fcdSqs 				} else {
7533dec9fcdSqs 					tx_ring_p->dvma_wr_index++;
7543dec9fcdSqs 				}
7553dec9fcdSqs 				tx_ring_p->dvma_pending--;
7563dec9fcdSqs 			} else if (tx_msg_p->flags.dma_type == USE_DMA) {
7573dec9fcdSqs 				HXGE_DEBUG_MSG((hxgep, TX_CTL,
7583dec9fcdSqs 				    "==> hxge_txdma_reclaim: USE DMA"));
7593dec9fcdSqs 				if (rc = ddi_dma_unbind_handle
7603dec9fcdSqs 				    (tx_msg_p->dma_handle)) {
7613dec9fcdSqs 					cmn_err(CE_WARN, "hxge_reclaim: "
7623dec9fcdSqs 					    "ddi_dma_unbind_handle "
7633dec9fcdSqs 					    "failed. status %d", rc);
7643dec9fcdSqs 				}
7653dec9fcdSqs 			}
7663dec9fcdSqs 
7673dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, TX_CTL,
7683dec9fcdSqs 			    "==> hxge_txdma_reclaim: count packets"));
7693dec9fcdSqs 
7703dec9fcdSqs 			/*
7713dec9fcdSqs 			 * count a chained packet only once.
7723dec9fcdSqs 			 */
7733dec9fcdSqs 			if (tx_msg_p->tx_message != NULL) {
7743dec9fcdSqs 				freemsg(tx_msg_p->tx_message);
7753dec9fcdSqs 				tx_msg_p->tx_message = NULL;
7763dec9fcdSqs 			}
7773dec9fcdSqs 			tx_msg_p->flags.dma_type = USE_NONE;
7783dec9fcdSqs 			tx_rd_index = tx_ring_p->rd_index;
7793dec9fcdSqs 			tx_rd_index = (tx_rd_index + 1) &
7803dec9fcdSqs 			    tx_ring_p->tx_wrap_mask;
7813dec9fcdSqs 			tx_ring_p->rd_index = tx_rd_index;
7823dec9fcdSqs 			tx_ring_p->descs_pending--;
7833dec9fcdSqs 			tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
7843dec9fcdSqs 			tx_msg_p = &tx_msg_ring[tx_rd_index];
7853dec9fcdSqs 		}
7863dec9fcdSqs 
7871ed83081SMichael Speer 		status = (nmblks <= ((int)tx_ring_p->tx_ring_size -
7881ed83081SMichael Speer 		    (int)tx_ring_p->descs_pending - TX_FULL_MARK));
7893dec9fcdSqs 		if (status) {
79075d94465SJosef 'Jeff' Sipek 			(void) atomic_cas_32((uint32_t *)&tx_ring_p->queueing,
79175d94465SJosef 'Jeff' Sipek 			    1, 0);
7923dec9fcdSqs 		}
7933dec9fcdSqs 	} else {
7941ed83081SMichael Speer 		status = (nmblks <= ((int)tx_ring_p->tx_ring_size -
7951ed83081SMichael Speer 		    (int)tx_ring_p->descs_pending - TX_FULL_MARK));
7963dec9fcdSqs 	}
7973dec9fcdSqs 
7983dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL,
7993dec9fcdSqs 	    "<== hxge_txdma_reclaim status = 0x%08x", status));
8003dec9fcdSqs 	return (status);
8013dec9fcdSqs }
8023dec9fcdSqs 
8033dec9fcdSqs uint_t
hxge_tx_intr(caddr_t arg1,caddr_t arg2)8043dec9fcdSqs hxge_tx_intr(caddr_t arg1, caddr_t arg2)
8053dec9fcdSqs {
8063dec9fcdSqs 	p_hxge_ldv_t	ldvp = (p_hxge_ldv_t)arg1;
8073dec9fcdSqs 	p_hxge_t	hxgep = (p_hxge_t)arg2;
8083dec9fcdSqs 	p_hxge_ldg_t	ldgp;
8093dec9fcdSqs 	uint8_t		channel;
8103dec9fcdSqs 	uint32_t	vindex;
8113dec9fcdSqs 	hpi_handle_t	handle;
8123dec9fcdSqs 	tdc_stat_t	cs;
8133dec9fcdSqs 	p_tx_ring_t	*tx_rings;
8143dec9fcdSqs 	p_tx_ring_t	tx_ring_p;
8153dec9fcdSqs 	hpi_status_t	rs = HPI_SUCCESS;
8163dec9fcdSqs 	uint_t		serviced = DDI_INTR_UNCLAIMED;
8173dec9fcdSqs 	hxge_status_t	status = HXGE_OK;
8183dec9fcdSqs 
8193dec9fcdSqs 	if (ldvp == NULL) {
8203dec9fcdSqs 		HXGE_DEBUG_MSG((NULL, INT_CTL,
8213dec9fcdSqs 		    "<== hxge_tx_intr: hxgep $%p ldvp $%p", hxgep, ldvp));
8223dec9fcdSqs 		return (DDI_INTR_UNCLAIMED);
8233dec9fcdSqs 	}
8243dec9fcdSqs 
8253dec9fcdSqs 	if (arg2 == NULL || (void *) ldvp->hxgep != arg2) {
8263dec9fcdSqs 		hxgep = ldvp->hxgep;
8273dec9fcdSqs 	}
8283dec9fcdSqs 
829dc10a9c2SMichael Speer 	/*
830dc10a9c2SMichael Speer 	 * If the interface is not started, just swallow the interrupt
831dc10a9c2SMichael Speer 	 * and don't rearm the logical device.
832dc10a9c2SMichael Speer 	 */
833dc10a9c2SMichael Speer 	if (hxgep->hxge_mac_state != HXGE_MAC_STARTED)
834dc10a9c2SMichael Speer 		return (DDI_INTR_CLAIMED);
835dc10a9c2SMichael Speer 
8363dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
8373dec9fcdSqs 	    "==> hxge_tx_intr: hxgep(arg2) $%p ldvp(arg1) $%p", hxgep, ldvp));
8383dec9fcdSqs 
8393dec9fcdSqs 	/*
8403dec9fcdSqs 	 * This interrupt handler is for a specific transmit dma channel.
8413dec9fcdSqs 	 */
8423dec9fcdSqs 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
8433dec9fcdSqs 
8443dec9fcdSqs 	/* Get the control and status for this channel. */
8453dec9fcdSqs 	channel = ldvp->channel;
8463dec9fcdSqs 	ldgp = ldvp->ldgp;
8473dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
8483dec9fcdSqs 	    "==> hxge_tx_intr: hxgep $%p ldvp (ldvp) $%p channel %d",
8493dec9fcdSqs 	    hxgep, ldvp, channel));
8503dec9fcdSqs 
8513dec9fcdSqs 	rs = hpi_txdma_control_status(handle, OP_GET, channel, &cs);
8523dec9fcdSqs 	vindex = ldvp->vdma_index;
8533dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
8543dec9fcdSqs 	    "==> hxge_tx_intr:channel %d ring index %d status 0x%08x",
8553dec9fcdSqs 	    channel, vindex, rs));
8563dec9fcdSqs 
8573dec9fcdSqs 	if (!rs && cs.bits.marked) {
8583dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
8593dec9fcdSqs 		    "==> hxge_tx_intr:channel %d ring index %d "
8603dec9fcdSqs 		    "status 0x%08x (marked bit set)", channel, vindex, rs));
8613dec9fcdSqs 		tx_rings = hxgep->tx_rings->rings;
8623dec9fcdSqs 		tx_ring_p = tx_rings[vindex];
8633dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
8643dec9fcdSqs 		    "==> hxge_tx_intr:channel %d ring index %d "
8653dec9fcdSqs 		    "status 0x%08x (marked bit set, calling reclaim)",
8663dec9fcdSqs 		    channel, vindex, rs));
8673dec9fcdSqs 
8683dec9fcdSqs 		MUTEX_ENTER(&tx_ring_p->lock);
8693dec9fcdSqs 		(void) hxge_txdma_reclaim(hxgep, tx_rings[vindex], 0);
8703dec9fcdSqs 		MUTEX_EXIT(&tx_ring_p->lock);
8713dec9fcdSqs 		mac_tx_update(hxgep->mach);
8723dec9fcdSqs 	}
8733dec9fcdSqs 
8743dec9fcdSqs 	/*
8753dec9fcdSqs 	 * Process other transmit control and status. Check the ldv state.
8763dec9fcdSqs 	 */
8773dec9fcdSqs 	status = hxge_tx_err_evnts(hxgep, ldvp->vdma_index, ldvp, cs);
8783dec9fcdSqs 
8793dec9fcdSqs 	/* Clear the error bits */
8803dec9fcdSqs 	RXDMA_REG_WRITE64(handle, TDC_STAT, channel, cs.value);
8813dec9fcdSqs 
8823dec9fcdSqs 	/*
8833dec9fcdSqs 	 * Rearm this logical group if this is a single device group.
8843dec9fcdSqs 	 */
8853dec9fcdSqs 	if (ldgp->nldvs == 1) {
8863dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_tx_intr: rearm"));
8873dec9fcdSqs 		if (status == HXGE_OK) {
8883dec9fcdSqs 			(void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg,
8893dec9fcdSqs 			    B_TRUE, ldgp->ldg_timer);
8903dec9fcdSqs 		}
8913dec9fcdSqs 	}
8923dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_tx_intr"));
8933dec9fcdSqs 	serviced = DDI_INTR_CLAIMED;
8943dec9fcdSqs 	return (serviced);
8953dec9fcdSqs }
8963dec9fcdSqs 
8973dec9fcdSqs void
hxge_txdma_stop(p_hxge_t hxgep)8983dec9fcdSqs hxge_txdma_stop(p_hxge_t hxgep)
8993dec9fcdSqs {
9003dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_stop"));
9013dec9fcdSqs 
9023dec9fcdSqs 	(void) hxge_tx_vmac_disable(hxgep);
9033dec9fcdSqs 	(void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
9043dec9fcdSqs 
9053dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_stop"));
9063dec9fcdSqs }
9073dec9fcdSqs 
9083dec9fcdSqs hxge_status_t
hxge_txdma_hw_mode(p_hxge_t hxgep,boolean_t enable)9093dec9fcdSqs hxge_txdma_hw_mode(p_hxge_t hxgep, boolean_t enable)
9103dec9fcdSqs {
9113dec9fcdSqs 	int		i, ndmas;
9123dec9fcdSqs 	uint16_t	channel;
9133dec9fcdSqs 	p_tx_rings_t	tx_rings;
9143dec9fcdSqs 	p_tx_ring_t	*tx_desc_rings;
9153dec9fcdSqs 	hpi_handle_t	handle;
9163dec9fcdSqs 	hpi_status_t	rs = HPI_SUCCESS;
9173dec9fcdSqs 	hxge_status_t	status = HXGE_OK;
9183dec9fcdSqs 
9193dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
9203dec9fcdSqs 	    "==> hxge_txdma_hw_mode: enable mode %d", enable));
9213dec9fcdSqs 
9223dec9fcdSqs 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
9233dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
9243dec9fcdSqs 		    "<== hxge_txdma_mode: not initialized"));
9253dec9fcdSqs 		return (HXGE_ERROR);
9263dec9fcdSqs 	}
9273dec9fcdSqs 	tx_rings = hxgep->tx_rings;
9283dec9fcdSqs 	if (tx_rings == NULL) {
9293dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
9303dec9fcdSqs 		    "<== hxge_txdma_hw_mode: NULL global ring pointer"));
9313dec9fcdSqs 		return (HXGE_ERROR);
9323dec9fcdSqs 	}
9333dec9fcdSqs 	tx_desc_rings = tx_rings->rings;
9343dec9fcdSqs 	if (tx_desc_rings == NULL) {
9353dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
9363dec9fcdSqs 		    "<== hxge_txdma_hw_mode: NULL rings pointer"));
9373dec9fcdSqs 		return (HXGE_ERROR);
9383dec9fcdSqs 	}
9393dec9fcdSqs 	ndmas = tx_rings->ndmas;
9403dec9fcdSqs 	if (!ndmas) {
9413dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
9423dec9fcdSqs 		    "<== hxge_txdma_hw_mode: no dma channel allocated"));
9433dec9fcdSqs 		return (HXGE_ERROR);
9443dec9fcdSqs 	}
9453dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_mode: "
9463dec9fcdSqs 	    "tx_rings $%p tx_desc_rings $%p ndmas %d",
9473dec9fcdSqs 	    tx_rings, tx_desc_rings, ndmas));
9483dec9fcdSqs 
9493dec9fcdSqs 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
9503dec9fcdSqs 	for (i = 0; i < ndmas; i++) {
9513dec9fcdSqs 		if (tx_desc_rings[i] == NULL) {
9523dec9fcdSqs 			continue;
9533dec9fcdSqs 		}
9543dec9fcdSqs 		channel = tx_desc_rings[i]->tdc;
9553dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
9563dec9fcdSqs 		    "==> hxge_txdma_hw_mode: channel %d", channel));
9573dec9fcdSqs 		if (enable) {
9583dec9fcdSqs 			rs = hpi_txdma_channel_enable(handle, channel);
9593dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
9603dec9fcdSqs 			    "==> hxge_txdma_hw_mode: channel %d (enable) "
9613dec9fcdSqs 			    "rs 0x%x", channel, rs));
9623dec9fcdSqs 		} else {
9633dec9fcdSqs 			/*
9643dec9fcdSqs 			 * Stop the dma channel and waits for the stop done. If
9653dec9fcdSqs 			 * the stop done bit is not set, then force an error so
9663dec9fcdSqs 			 * TXC will stop. All channels bound to this port need
9673dec9fcdSqs 			 * to be stopped and reset after injecting an interrupt
9683dec9fcdSqs 			 * error.
9693dec9fcdSqs 			 */
9703dec9fcdSqs 			rs = hpi_txdma_channel_disable(handle, channel);
9713dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
9723dec9fcdSqs 			    "==> hxge_txdma_hw_mode: channel %d (disable) "
9733dec9fcdSqs 			    "rs 0x%x", channel, rs));
9743dec9fcdSqs 		}
9753dec9fcdSqs 	}
9763dec9fcdSqs 
9773dec9fcdSqs 	status = ((rs == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR | rs);
9783dec9fcdSqs 
9793dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
9803dec9fcdSqs 	    "<== hxge_txdma_hw_mode: status 0x%x", status));
9813dec9fcdSqs 
9823dec9fcdSqs 	return (status);
9833dec9fcdSqs }
9843dec9fcdSqs 
9853dec9fcdSqs void
hxge_txdma_enable_channel(p_hxge_t hxgep,uint16_t channel)9863dec9fcdSqs hxge_txdma_enable_channel(p_hxge_t hxgep, uint16_t channel)
9873dec9fcdSqs {
9883dec9fcdSqs 	hpi_handle_t handle;
9893dec9fcdSqs 
9903dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
9913dec9fcdSqs 	    "==> hxge_txdma_enable_channel: channel %d", channel));
9923dec9fcdSqs 
9933dec9fcdSqs 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
9943dec9fcdSqs 	/* enable the transmit dma channels */
9953dec9fcdSqs 	(void) hpi_txdma_channel_enable(handle, channel);
9963dec9fcdSqs 
9973dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_txdma_enable_channel"));
9983dec9fcdSqs }
9993dec9fcdSqs 
10003dec9fcdSqs void
hxge_txdma_disable_channel(p_hxge_t hxgep,uint16_t channel)10013dec9fcdSqs hxge_txdma_disable_channel(p_hxge_t hxgep, uint16_t channel)
10023dec9fcdSqs {
10033dec9fcdSqs 	hpi_handle_t handle;
10043dec9fcdSqs 
10053dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
10063dec9fcdSqs 	    "==> hxge_txdma_disable_channel: channel %d", channel));
10073dec9fcdSqs 
10083dec9fcdSqs 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
10093dec9fcdSqs 	/* stop the transmit dma channels */
10103dec9fcdSqs 	(void) hpi_txdma_channel_disable(handle, channel);
10113dec9fcdSqs 
10123dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_disable_channel"));
10133dec9fcdSqs }
10143dec9fcdSqs 
10153dec9fcdSqs int
hxge_txdma_stop_inj_err(p_hxge_t hxgep,int channel)10163dec9fcdSqs hxge_txdma_stop_inj_err(p_hxge_t hxgep, int channel)
10173dec9fcdSqs {
10183dec9fcdSqs 	hpi_handle_t	handle;
10193dec9fcdSqs 	int		status;
10203dec9fcdSqs 	hpi_status_t	rs = HPI_SUCCESS;
10213dec9fcdSqs 
10223dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_stop_inj_err"));
10233dec9fcdSqs 
10243dec9fcdSqs 	/*
10253dec9fcdSqs 	 * Stop the dma channel waits for the stop done. If the stop done bit
10263dec9fcdSqs 	 * is not set, then create an error.
10273dec9fcdSqs 	 */
10283dec9fcdSqs 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
10293dec9fcdSqs 	rs = hpi_txdma_channel_disable(handle, channel);
10303dec9fcdSqs 	status = ((rs == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR | rs);
10313dec9fcdSqs 	if (status == HXGE_OK) {
10323dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
10333dec9fcdSqs 		    "<== hxge_txdma_stop_inj_err (channel %d): "
10343dec9fcdSqs 		    "stopped OK", channel));
10353dec9fcdSqs 		return (status);
10363dec9fcdSqs 	}
10373dec9fcdSqs 
10383dec9fcdSqs 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
10393dec9fcdSqs 	    "==> hxge_txdma_stop_inj_err (channel): stop failed (0x%x) "
10403dec9fcdSqs 	    " (injected error but still not stopped)", channel, rs));
10413dec9fcdSqs 
10423dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_stop_inj_err"));
10433dec9fcdSqs 
10443dec9fcdSqs 	return (status);
10453dec9fcdSqs }
10463dec9fcdSqs 
10473dec9fcdSqs /*ARGSUSED*/
10483dec9fcdSqs void
hxge_fixup_txdma_rings(p_hxge_t hxgep)10493dec9fcdSqs hxge_fixup_txdma_rings(p_hxge_t hxgep)
10503dec9fcdSqs {
10513dec9fcdSqs 	int		index, ndmas;
10523dec9fcdSqs 	uint16_t	channel;
10533dec9fcdSqs 	p_tx_rings_t	tx_rings;
10543dec9fcdSqs 
10553dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_fixup_txdma_rings"));
10563dec9fcdSqs 
10573dec9fcdSqs 	/*
10583dec9fcdSqs 	 * For each transmit channel, reclaim each descriptor and free buffers.
10593dec9fcdSqs 	 */
10603dec9fcdSqs 	tx_rings = hxgep->tx_rings;
10613dec9fcdSqs 	if (tx_rings == NULL) {
10623dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
10633dec9fcdSqs 		    "<== hxge_fixup_txdma_rings: NULL ring pointer"));
10643dec9fcdSqs 		return;
10653dec9fcdSqs 	}
10663dec9fcdSqs 
10673dec9fcdSqs 	ndmas = tx_rings->ndmas;
10683dec9fcdSqs 	if (!ndmas) {
10693dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
10703dec9fcdSqs 		    "<== hxge_fixup_txdma_rings: no channel allocated"));
10713dec9fcdSqs 		return;
10723dec9fcdSqs 	}
10733dec9fcdSqs 
10743dec9fcdSqs 	if (tx_rings->rings == NULL) {
10753dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
10763dec9fcdSqs 		    "<== hxge_fixup_txdma_rings: NULL rings pointer"));
10773dec9fcdSqs 		return;
10783dec9fcdSqs 	}
10793dec9fcdSqs 
10803dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_fixup_txdma_rings: "
10813dec9fcdSqs 	    "tx_rings $%p tx_desc_rings $%p ndmas %d",
10823dec9fcdSqs 	    tx_rings, tx_rings->rings, ndmas));
10833dec9fcdSqs 
10843dec9fcdSqs 	for (index = 0; index < ndmas; index++) {
10853dec9fcdSqs 		channel = tx_rings->rings[index]->tdc;
10863dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
10873dec9fcdSqs 		    "==> hxge_fixup_txdma_rings: channel %d", channel));
10883dec9fcdSqs 		hxge_txdma_fixup_channel(hxgep, tx_rings->rings[index],
10893dec9fcdSqs 		    channel);
10903dec9fcdSqs 	}
10913dec9fcdSqs 
10923dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_fixup_txdma_rings"));
10933dec9fcdSqs }
10943dec9fcdSqs 
10953dec9fcdSqs /*ARGSUSED*/
10963dec9fcdSqs void
hxge_txdma_fix_channel(p_hxge_t hxgep,uint16_t channel)10973dec9fcdSqs hxge_txdma_fix_channel(p_hxge_t hxgep, uint16_t channel)
10983dec9fcdSqs {
10993dec9fcdSqs 	p_tx_ring_t ring_p;
11003dec9fcdSqs 
11013dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fix_channel"));
11023dec9fcdSqs 
11033dec9fcdSqs 	ring_p = hxge_txdma_get_ring(hxgep, channel);
11043dec9fcdSqs 	if (ring_p == NULL) {
11053dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fix_channel"));
11063dec9fcdSqs 		return;
11073dec9fcdSqs 	}
11083dec9fcdSqs 
11093dec9fcdSqs 	if (ring_p->tdc != channel) {
11103dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
11113dec9fcdSqs 		    "<== hxge_txdma_fix_channel: channel not matched "
11123dec9fcdSqs 		    "ring tdc %d passed channel", ring_p->tdc, channel));
11133dec9fcdSqs 		return;
11143dec9fcdSqs 	}
11153dec9fcdSqs 
11163dec9fcdSqs 	hxge_txdma_fixup_channel(hxgep, ring_p, channel);
11173dec9fcdSqs 
11183dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fix_channel"));
11193dec9fcdSqs }
11203dec9fcdSqs 
11213dec9fcdSqs /*ARGSUSED*/
11223dec9fcdSqs void
hxge_txdma_fixup_channel(p_hxge_t hxgep,p_tx_ring_t ring_p,uint16_t channel)11233dec9fcdSqs hxge_txdma_fixup_channel(p_hxge_t hxgep, p_tx_ring_t ring_p, uint16_t channel)
11243dec9fcdSqs {
11253dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fixup_channel"));
11263dec9fcdSqs 
11273dec9fcdSqs 	if (ring_p == NULL) {
11283dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
11293dec9fcdSqs 		    "<== hxge_txdma_fixup_channel: NULL ring pointer"));
11303dec9fcdSqs 		return;
11313dec9fcdSqs 	}
11323dec9fcdSqs 	if (ring_p->tdc != channel) {
11333dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
11343dec9fcdSqs 		    "<== hxge_txdma_fixup_channel: channel not matched "
11353dec9fcdSqs 		    "ring tdc %d passed channel", ring_p->tdc, channel));
11363dec9fcdSqs 		return;
11373dec9fcdSqs 	}
11383dec9fcdSqs 	MUTEX_ENTER(&ring_p->lock);
11393dec9fcdSqs 	(void) hxge_txdma_reclaim(hxgep, ring_p, 0);
11403dec9fcdSqs 
11413dec9fcdSqs 	ring_p->rd_index = 0;
11423dec9fcdSqs 	ring_p->wr_index = 0;
11433dec9fcdSqs 	ring_p->ring_head.value = 0;
11443dec9fcdSqs 	ring_p->ring_kick_tail.value = 0;
11453dec9fcdSqs 	ring_p->descs_pending = 0;
11463dec9fcdSqs 	MUTEX_EXIT(&ring_p->lock);
11473dec9fcdSqs 
11483dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fixup_channel"));
11493dec9fcdSqs }
11503dec9fcdSqs 
11513dec9fcdSqs /*ARGSUSED*/
11523dec9fcdSqs void
hxge_txdma_hw_kick(p_hxge_t hxgep)11533dec9fcdSqs hxge_txdma_hw_kick(p_hxge_t hxgep)
11543dec9fcdSqs {
11553dec9fcdSqs 	int		index, ndmas;
11563dec9fcdSqs 	uint16_t	channel;
11573dec9fcdSqs 	p_tx_rings_t	tx_rings;
11583dec9fcdSqs 
11593dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_hw_kick"));
11603dec9fcdSqs 
11613dec9fcdSqs 	tx_rings = hxgep->tx_rings;
11623dec9fcdSqs 	if (tx_rings == NULL) {
11633dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
11643dec9fcdSqs 		    "<== hxge_txdma_hw_kick: NULL ring pointer"));
11653dec9fcdSqs 		return;
11663dec9fcdSqs 	}
11673dec9fcdSqs 	ndmas = tx_rings->ndmas;
11683dec9fcdSqs 	if (!ndmas) {
11693dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
11703dec9fcdSqs 		    "<== hxge_txdma_hw_kick: no channel allocated"));
11713dec9fcdSqs 		return;
11723dec9fcdSqs 	}
11733dec9fcdSqs 	if (tx_rings->rings == NULL) {
11743dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
11753dec9fcdSqs 		    "<== hxge_txdma_hw_kick: NULL rings pointer"));
11763dec9fcdSqs 		return;
11773dec9fcdSqs 	}
11783dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_kick: "
11793dec9fcdSqs 	    "tx_rings $%p tx_desc_rings $%p ndmas %d",
11803dec9fcdSqs 	    tx_rings, tx_rings->rings, ndmas));
11813dec9fcdSqs 
11823dec9fcdSqs 	for (index = 0; index < ndmas; index++) {
11833dec9fcdSqs 		channel = tx_rings->rings[index]->tdc;
11843dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
11853dec9fcdSqs 		    "==> hxge_txdma_hw_kick: channel %d", channel));
11863dec9fcdSqs 		hxge_txdma_hw_kick_channel(hxgep, tx_rings->rings[index],
11873dec9fcdSqs 		    channel);
11883dec9fcdSqs 	}
11893dec9fcdSqs 
11903dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_hw_kick"));
11913dec9fcdSqs }
11923dec9fcdSqs 
11933dec9fcdSqs /*ARGSUSED*/
11943dec9fcdSqs void
hxge_txdma_kick_channel(p_hxge_t hxgep,uint16_t channel)11953dec9fcdSqs hxge_txdma_kick_channel(p_hxge_t hxgep, uint16_t channel)
11963dec9fcdSqs {
11973dec9fcdSqs 	p_tx_ring_t ring_p;
11983dec9fcdSqs 
11993dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_kick_channel"));
12003dec9fcdSqs 
12013dec9fcdSqs 	ring_p = hxge_txdma_get_ring(hxgep, channel);
12023dec9fcdSqs 	if (ring_p == NULL) {
12033dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL, " hxge_txdma_kick_channel"));
12043dec9fcdSqs 		return;
12053dec9fcdSqs 	}
12063dec9fcdSqs 
12073dec9fcdSqs 	if (ring_p->tdc != channel) {
12083dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
12093dec9fcdSqs 		    "<== hxge_txdma_kick_channel: channel not matched "
12103dec9fcdSqs 		    "ring tdc %d passed channel", ring_p->tdc, channel));
12113dec9fcdSqs 		return;
12123dec9fcdSqs 	}
12133dec9fcdSqs 
12143dec9fcdSqs 	hxge_txdma_hw_kick_channel(hxgep, ring_p, channel);
12153dec9fcdSqs 
12163dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_kick_channel"));
12173dec9fcdSqs }
12183dec9fcdSqs 
12193dec9fcdSqs /*ARGSUSED*/
12203dec9fcdSqs void
hxge_txdma_hw_kick_channel(p_hxge_t hxgep,p_tx_ring_t ring_p,uint16_t channel)12213dec9fcdSqs hxge_txdma_hw_kick_channel(p_hxge_t hxgep, p_tx_ring_t ring_p, uint16_t channel)
12223dec9fcdSqs {
12233dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_hw_kick_channel"));
12243dec9fcdSqs 
12253dec9fcdSqs 	if (ring_p == NULL) {
12263dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
12273dec9fcdSqs 		    "<== hxge_txdma_hw_kick_channel: NULL ring pointer"));
12283dec9fcdSqs 		return;
12293dec9fcdSqs 	}
12303dec9fcdSqs 
12313dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_hw_kick_channel"));
12323dec9fcdSqs }
12333dec9fcdSqs 
12343dec9fcdSqs /*ARGSUSED*/
12353dec9fcdSqs void
hxge_check_tx_hang(p_hxge_t hxgep)12363dec9fcdSqs hxge_check_tx_hang(p_hxge_t hxgep)
12373dec9fcdSqs {
12383dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_check_tx_hang"));
12393dec9fcdSqs 
12403dec9fcdSqs 	/*
12413dec9fcdSqs 	 * Needs inputs from hardware for regs: head index had not moved since
12423dec9fcdSqs 	 * last timeout. packets not transmitted or stuffed registers.
12433dec9fcdSqs 	 */
12443dec9fcdSqs 	if (hxge_txdma_hung(hxgep)) {
12453dec9fcdSqs 		hxge_fixup_hung_txdma_rings(hxgep);
12463dec9fcdSqs 	}
12473dec9fcdSqs 
12483dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_check_tx_hang"));
12493dec9fcdSqs }
12503dec9fcdSqs 
12513dec9fcdSqs int
hxge_txdma_hung(p_hxge_t hxgep)12523dec9fcdSqs hxge_txdma_hung(p_hxge_t hxgep)
12533dec9fcdSqs {
12543dec9fcdSqs 	int		index, ndmas;
12553dec9fcdSqs 	uint16_t	channel;
12563dec9fcdSqs 	p_tx_rings_t	tx_rings;
12573dec9fcdSqs 	p_tx_ring_t	tx_ring_p;
12583dec9fcdSqs 
12593dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_hung"));
12603dec9fcdSqs 
12613dec9fcdSqs 	tx_rings = hxgep->tx_rings;
12623dec9fcdSqs 	if (tx_rings == NULL) {
12633dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
12643dec9fcdSqs 		    "<== hxge_txdma_hung: NULL ring pointer"));
12653dec9fcdSqs 		return (B_FALSE);
12663dec9fcdSqs 	}
12673dec9fcdSqs 
12683dec9fcdSqs 	ndmas = tx_rings->ndmas;
12693dec9fcdSqs 	if (!ndmas) {
12703dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
12713dec9fcdSqs 		    "<== hxge_txdma_hung: no channel allocated"));
12723dec9fcdSqs 		return (B_FALSE);
12733dec9fcdSqs 	}
12743dec9fcdSqs 
12753dec9fcdSqs 	if (tx_rings->rings == NULL) {
12763dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
12773dec9fcdSqs 		    "<== hxge_txdma_hung: NULL rings pointer"));
12783dec9fcdSqs 		return (B_FALSE);
12793dec9fcdSqs 	}
12803dec9fcdSqs 
12813dec9fcdSqs 	for (index = 0; index < ndmas; index++) {
12823dec9fcdSqs 		channel = tx_rings->rings[index]->tdc;
12833dec9fcdSqs 		tx_ring_p = tx_rings->rings[index];
12843dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
12853dec9fcdSqs 		    "==> hxge_txdma_hung: channel %d", channel));
12863dec9fcdSqs 		if (hxge_txdma_channel_hung(hxgep, tx_ring_p, channel)) {
12873dec9fcdSqs 			return (B_TRUE);
12883dec9fcdSqs 		}
12893dec9fcdSqs 	}
12903dec9fcdSqs 
12913dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_hung"));
12923dec9fcdSqs 
12933dec9fcdSqs 	return (B_FALSE);
12943dec9fcdSqs }
12953dec9fcdSqs 
12963dec9fcdSqs int
hxge_txdma_channel_hung(p_hxge_t hxgep,p_tx_ring_t tx_ring_p,uint16_t channel)12973dec9fcdSqs hxge_txdma_channel_hung(p_hxge_t hxgep, p_tx_ring_t tx_ring_p, uint16_t channel)
12983dec9fcdSqs {
12993dec9fcdSqs 	uint16_t	head_index, tail_index;
13003dec9fcdSqs 	boolean_t	head_wrap, tail_wrap;
13013dec9fcdSqs 	hpi_handle_t	handle;
13023dec9fcdSqs 	tdc_tdr_head_t	tx_head;
13033dec9fcdSqs 	uint_t		tx_rd_index;
13043dec9fcdSqs 
13053dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_channel_hung"));
13063dec9fcdSqs 
13073dec9fcdSqs 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
13083dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL,
13093dec9fcdSqs 	    "==> hxge_txdma_channel_hung: channel %d", channel));
13103dec9fcdSqs 	MUTEX_ENTER(&tx_ring_p->lock);
13113dec9fcdSqs 	(void) hxge_txdma_reclaim(hxgep, tx_ring_p, 0);
13123dec9fcdSqs 
13133dec9fcdSqs 	tail_index = tx_ring_p->wr_index;
13143dec9fcdSqs 	tail_wrap = tx_ring_p->wr_index_wrap;
13153dec9fcdSqs 	tx_rd_index = tx_ring_p->rd_index;
13163dec9fcdSqs 	MUTEX_EXIT(&tx_ring_p->lock);
13173dec9fcdSqs 
13183dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL,
13193dec9fcdSqs 	    "==> hxge_txdma_channel_hung: tdc %d tx_rd_index %d "
13203dec9fcdSqs 	    "tail_index %d tail_wrap %d ",
13213dec9fcdSqs 	    channel, tx_rd_index, tail_index, tail_wrap));
13223dec9fcdSqs 	/*
13233dec9fcdSqs 	 * Read the hardware maintained transmit head and wrap around bit.
13243dec9fcdSqs 	 */
13253dec9fcdSqs 	(void) hpi_txdma_ring_head_get(handle, channel, &tx_head);
13263dec9fcdSqs 	head_index = tx_head.bits.head;
13273dec9fcdSqs 	head_wrap = tx_head.bits.wrap;
13283dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_channel_hung: "
13293dec9fcdSqs 	    "tx_rd_index %d tail %d tail_wrap %d head %d wrap %d",
13303dec9fcdSqs 	    tx_rd_index, tail_index, tail_wrap, head_index, head_wrap));
13313dec9fcdSqs 
13323dec9fcdSqs 	if (TXDMA_RING_EMPTY(head_index, head_wrap, tail_index, tail_wrap) &&
13333dec9fcdSqs 	    (head_index == tx_rd_index)) {
13343dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
13353dec9fcdSqs 		    "==> hxge_txdma_channel_hung: EMPTY"));
13363dec9fcdSqs 		return (B_FALSE);
13373dec9fcdSqs 	}
13383dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL,
13393dec9fcdSqs 	    "==> hxge_txdma_channel_hung: Checking if ring full"));
13403dec9fcdSqs 	if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, tail_wrap)) {
13413dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
13423dec9fcdSqs 		    "==> hxge_txdma_channel_hung: full"));
13433dec9fcdSqs 		return (B_TRUE);
13443dec9fcdSqs 	}
13453dec9fcdSqs 
13463dec9fcdSqs 	/* If not full, check with hardware to see if it is hung */
13473dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_channel_hung"));
13483dec9fcdSqs 
13493dec9fcdSqs 	return (B_FALSE);
13503dec9fcdSqs }
13513dec9fcdSqs 
13523dec9fcdSqs /*ARGSUSED*/
13533dec9fcdSqs void
hxge_fixup_hung_txdma_rings(p_hxge_t hxgep)13543dec9fcdSqs hxge_fixup_hung_txdma_rings(p_hxge_t hxgep)
13553dec9fcdSqs {
13563dec9fcdSqs 	int		index, ndmas;
13573dec9fcdSqs 	uint16_t	channel;
13583dec9fcdSqs 	p_tx_rings_t	tx_rings;
13593dec9fcdSqs 
13603dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_fixup_hung_txdma_rings"));
13613dec9fcdSqs 	tx_rings = hxgep->tx_rings;
13623dec9fcdSqs 	if (tx_rings == NULL) {
13633dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
13643dec9fcdSqs 		    "<== hxge_fixup_hung_txdma_rings: NULL ring pointer"));
13653dec9fcdSqs 		return;
13663dec9fcdSqs 	}
13673dec9fcdSqs 	ndmas = tx_rings->ndmas;
13683dec9fcdSqs 	if (!ndmas) {
13693dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
13703dec9fcdSqs 		    "<== hxge_fixup_hung_txdma_rings: no channel allocated"));
13713dec9fcdSqs 		return;
13723dec9fcdSqs 	}
13733dec9fcdSqs 	if (tx_rings->rings == NULL) {
13743dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
13753dec9fcdSqs 		    "<== hxge_fixup_hung_txdma_rings: NULL rings pointer"));
13763dec9fcdSqs 		return;
13773dec9fcdSqs 	}
13783dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_fixup_hung_txdma_rings: "
13793dec9fcdSqs 	    "tx_rings $%p tx_desc_rings $%p ndmas %d",
13803dec9fcdSqs 	    tx_rings, tx_rings->rings, ndmas));
13813dec9fcdSqs 
13823dec9fcdSqs 	for (index = 0; index < ndmas; index++) {
13833dec9fcdSqs 		channel = tx_rings->rings[index]->tdc;
13843dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
13853dec9fcdSqs 		    "==> hxge_fixup_hung_txdma_rings: channel %d", channel));
13863dec9fcdSqs 		hxge_txdma_fixup_hung_channel(hxgep, tx_rings->rings[index],
13873dec9fcdSqs 		    channel);
13883dec9fcdSqs 	}
13893dec9fcdSqs 
13903dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_fixup_hung_txdma_rings"));
13913dec9fcdSqs }
13923dec9fcdSqs 
13933dec9fcdSqs /*ARGSUSED*/
13943dec9fcdSqs void
hxge_txdma_fix_hung_channel(p_hxge_t hxgep,uint16_t channel)13953dec9fcdSqs hxge_txdma_fix_hung_channel(p_hxge_t hxgep, uint16_t channel)
13963dec9fcdSqs {
13973dec9fcdSqs 	p_tx_ring_t ring_p;
13983dec9fcdSqs 
13993dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fix_hung_channel"));
14003dec9fcdSqs 	ring_p = hxge_txdma_get_ring(hxgep, channel);
14013dec9fcdSqs 	if (ring_p == NULL) {
14023dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
14033dec9fcdSqs 		    "<== hxge_txdma_fix_hung_channel"));
14043dec9fcdSqs 		return;
14053dec9fcdSqs 	}
14063dec9fcdSqs 	if (ring_p->tdc != channel) {
14073dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
14083dec9fcdSqs 		    "<== hxge_txdma_fix_hung_channel: channel not matched "
14093dec9fcdSqs 		    "ring tdc %d passed channel", ring_p->tdc, channel));
14103dec9fcdSqs 		return;
14113dec9fcdSqs 	}
14123dec9fcdSqs 	hxge_txdma_fixup_channel(hxgep, ring_p, channel);
14133dec9fcdSqs 
14143dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fix_hung_channel"));
14153dec9fcdSqs }
14163dec9fcdSqs 
14173dec9fcdSqs /*ARGSUSED*/
14183dec9fcdSqs void
hxge_txdma_fixup_hung_channel(p_hxge_t hxgep,p_tx_ring_t ring_p,uint16_t channel)14193dec9fcdSqs hxge_txdma_fixup_hung_channel(p_hxge_t hxgep, p_tx_ring_t ring_p,
14203dec9fcdSqs     uint16_t channel)
14213dec9fcdSqs {
14223dec9fcdSqs 	hpi_handle_t	handle;
14233dec9fcdSqs 	int		status = HXGE_OK;
14243dec9fcdSqs 
14253dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fixup_hung_channel"));
14263dec9fcdSqs 
14273dec9fcdSqs 	if (ring_p == NULL) {
14283dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
14293dec9fcdSqs 		    "<== hxge_txdma_fixup_hung_channel: NULL ring pointer"));
14303dec9fcdSqs 		return;
14313dec9fcdSqs 	}
14323dec9fcdSqs 	if (ring_p->tdc != channel) {
14333dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
14343dec9fcdSqs 		    "<== hxge_txdma_fixup_hung_channel: channel "
14353dec9fcdSqs 		    "not matched ring tdc %d passed channel",
14363dec9fcdSqs 		    ring_p->tdc, channel));
14373dec9fcdSqs 		return;
14383dec9fcdSqs 	}
14393dec9fcdSqs 	/* Reclaim descriptors */
14403dec9fcdSqs 	MUTEX_ENTER(&ring_p->lock);
14413dec9fcdSqs 	(void) hxge_txdma_reclaim(hxgep, ring_p, 0);
14423dec9fcdSqs 	MUTEX_EXIT(&ring_p->lock);
14433dec9fcdSqs 
14443dec9fcdSqs 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
14453dec9fcdSqs 	/*
14463dec9fcdSqs 	 * Stop the dma channel waits for the stop done. If the stop done bit
14473dec9fcdSqs 	 * is not set, then force an error.
14483dec9fcdSqs 	 */
14493dec9fcdSqs 	status = hpi_txdma_channel_disable(handle, channel);
14503dec9fcdSqs 	if (!(status & HPI_TXDMA_STOP_FAILED)) {
14513dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
14523dec9fcdSqs 		    "<== hxge_txdma_fixup_hung_channel: stopped OK "
14533dec9fcdSqs 		    "ring tdc %d passed channel %d", ring_p->tdc, channel));
14543dec9fcdSqs 		return;
14553dec9fcdSqs 	}
14563dec9fcdSqs 	/* Stop done bit will be set as a result of error injection */
14573dec9fcdSqs 	status = hpi_txdma_channel_disable(handle, channel);
14583dec9fcdSqs 	if (!(status & HPI_TXDMA_STOP_FAILED)) {
14593dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
14603dec9fcdSqs 		    "<== hxge_txdma_fixup_hung_channel: stopped again"
14613dec9fcdSqs 		    "ring tdc %d passed channel", ring_p->tdc, channel));
14623dec9fcdSqs 		return;
14633dec9fcdSqs 	}
14643dec9fcdSqs 
14653dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL,
14663dec9fcdSqs 	    "<== hxge_txdma_fixup_hung_channel: stop done still not set!! "
14673dec9fcdSqs 	    "ring tdc %d passed channel", ring_p->tdc, channel));
14683dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fixup_hung_channel"));
14693dec9fcdSqs }
14703dec9fcdSqs 
14713dec9fcdSqs /*ARGSUSED*/
14723dec9fcdSqs void
hxge_reclaim_rings(p_hxge_t hxgep)14733dec9fcdSqs hxge_reclaim_rings(p_hxge_t hxgep)
14743dec9fcdSqs {
14753dec9fcdSqs 	int		index, ndmas;
14763dec9fcdSqs 	uint16_t	channel;
14773dec9fcdSqs 	p_tx_rings_t	tx_rings;
14783dec9fcdSqs 	p_tx_ring_t	tx_ring_p;
14793dec9fcdSqs 
14803dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_reclaim_ring"));
14813dec9fcdSqs 	tx_rings = hxgep->tx_rings;
14823dec9fcdSqs 	if (tx_rings == NULL) {
14833dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
14843dec9fcdSqs 		    "<== hxge_reclain_rimgs: NULL ring pointer"));
14853dec9fcdSqs 		return;
14863dec9fcdSqs 	}
14873dec9fcdSqs 	ndmas = tx_rings->ndmas;
14883dec9fcdSqs 	if (!ndmas) {
14893dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
14903dec9fcdSqs 		    "<== hxge_reclain_rimgs: no channel allocated"));
14913dec9fcdSqs 		return;
14923dec9fcdSqs 	}
14933dec9fcdSqs 	if (tx_rings->rings == NULL) {
14943dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
14953dec9fcdSqs 		    "<== hxge_reclain_rimgs: NULL rings pointer"));
14963dec9fcdSqs 		return;
14973dec9fcdSqs 	}
14983dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_reclain_rimgs: "
14993dec9fcdSqs 	    "tx_rings $%p tx_desc_rings $%p ndmas %d",
15003dec9fcdSqs 	    tx_rings, tx_rings->rings, ndmas));
15013dec9fcdSqs 
15023dec9fcdSqs 	for (index = 0; index < ndmas; index++) {
15033dec9fcdSqs 		channel = tx_rings->rings[index]->tdc;
15043dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> reclain_rimgs: channel %d",
15053dec9fcdSqs 		    channel));
15063dec9fcdSqs 		tx_ring_p = tx_rings->rings[index];
15073dec9fcdSqs 		MUTEX_ENTER(&tx_ring_p->lock);
15083dec9fcdSqs 		(void) hxge_txdma_reclaim(hxgep, tx_ring_p, channel);
15093dec9fcdSqs 		MUTEX_EXIT(&tx_ring_p->lock);
15103dec9fcdSqs 	}
15113dec9fcdSqs 
15123dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_reclaim_rings"));
15133dec9fcdSqs }
15143dec9fcdSqs 
15153dec9fcdSqs /*
15163dec9fcdSqs  * Static functions start here.
15173dec9fcdSqs  */
15183dec9fcdSqs static hxge_status_t
hxge_map_txdma(p_hxge_t hxgep)15193dec9fcdSqs hxge_map_txdma(p_hxge_t hxgep)
15203dec9fcdSqs {
15213dec9fcdSqs 	int			i, ndmas;
15223dec9fcdSqs 	uint16_t		channel;
15233dec9fcdSqs 	p_tx_rings_t		tx_rings;
15243dec9fcdSqs 	p_tx_ring_t		*tx_desc_rings;
15253dec9fcdSqs 	p_tx_mbox_areas_t	tx_mbox_areas_p;
15263dec9fcdSqs 	p_tx_mbox_t		*tx_mbox_p;
15273dec9fcdSqs 	p_hxge_dma_pool_t	dma_buf_poolp;
15283dec9fcdSqs 	p_hxge_dma_pool_t	dma_cntl_poolp;
15293dec9fcdSqs 	p_hxge_dma_common_t	*dma_buf_p;
15303dec9fcdSqs 	p_hxge_dma_common_t	*dma_cntl_p;
15313dec9fcdSqs 	hxge_status_t		status = HXGE_OK;
15323dec9fcdSqs 
15333dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma"));
15343dec9fcdSqs 
15353dec9fcdSqs 	dma_buf_poolp = hxgep->tx_buf_pool_p;
15363dec9fcdSqs 	dma_cntl_poolp = hxgep->tx_cntl_pool_p;
15373dec9fcdSqs 
15383dec9fcdSqs 	if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
15393dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
15403dec9fcdSqs 		    "==> hxge_map_txdma: buf not allocated"));
15413dec9fcdSqs 		return (HXGE_ERROR);
15423dec9fcdSqs 	}
15433dec9fcdSqs 	ndmas = dma_buf_poolp->ndmas;
15443dec9fcdSqs 	if (!ndmas) {
15453dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
15463dec9fcdSqs 		    "<== hxge_map_txdma: no dma allocated"));
15473dec9fcdSqs 		return (HXGE_ERROR);
15483dec9fcdSqs 	}
15493dec9fcdSqs 	dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
15503dec9fcdSqs 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
15513dec9fcdSqs 
15523dec9fcdSqs 	tx_rings = (p_tx_rings_t)KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP);
15533dec9fcdSqs 	tx_desc_rings = (p_tx_ring_t *)KMEM_ZALLOC(
15543dec9fcdSqs 	    sizeof (p_tx_ring_t) * ndmas, KM_SLEEP);
15553dec9fcdSqs 
15563dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma: "
15573dec9fcdSqs 	    "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings));
15583dec9fcdSqs 
15593dec9fcdSqs 	tx_mbox_areas_p = (p_tx_mbox_areas_t)
15603dec9fcdSqs 	    KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP);
15613dec9fcdSqs 	tx_mbox_p = (p_tx_mbox_t *)KMEM_ZALLOC(
15623dec9fcdSqs 	    sizeof (p_tx_mbox_t) * ndmas, KM_SLEEP);
15633dec9fcdSqs 
15643dec9fcdSqs 	/*
15653dec9fcdSqs 	 * Map descriptors from the buffer pools for each dma channel.
15663dec9fcdSqs 	 */
15673dec9fcdSqs 	for (i = 0; i < ndmas; i++) {
15683dec9fcdSqs 		/*
15693dec9fcdSqs 		 * Set up and prepare buffer blocks, descriptors and mailbox.
15703dec9fcdSqs 		 */
15713dec9fcdSqs 		channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
15723dec9fcdSqs 		status = hxge_map_txdma_channel(hxgep, channel,
15733dec9fcdSqs 		    (p_hxge_dma_common_t *)&dma_buf_p[i],
15743dec9fcdSqs 		    (p_tx_ring_t *)&tx_desc_rings[i],
15753dec9fcdSqs 		    dma_buf_poolp->num_chunks[i],
15763dec9fcdSqs 		    (p_hxge_dma_common_t *)&dma_cntl_p[i],
15773dec9fcdSqs 		    (p_tx_mbox_t *)&tx_mbox_p[i]);
15783dec9fcdSqs 		if (status != HXGE_OK) {
15793dec9fcdSqs 			goto hxge_map_txdma_fail1;
15803dec9fcdSqs 		}
15813dec9fcdSqs 		tx_desc_rings[i]->index = (uint16_t)i;
15823dec9fcdSqs 		tx_desc_rings[i]->tdc_stats = &hxgep->statsp->tdc_stats[i];
15833dec9fcdSqs 	}
15843dec9fcdSqs 
15853dec9fcdSqs 	tx_rings->ndmas = ndmas;
15863dec9fcdSqs 	tx_rings->rings = tx_desc_rings;
15873dec9fcdSqs 	hxgep->tx_rings = tx_rings;
15883dec9fcdSqs 	tx_mbox_areas_p->txmbox_areas_p = tx_mbox_p;
15893dec9fcdSqs 	hxgep->tx_mbox_areas_p = tx_mbox_areas_p;
15903dec9fcdSqs 
15913dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma: "
15923dec9fcdSqs 	    "tx_rings $%p rings $%p", hxgep->tx_rings, hxgep->tx_rings->rings));
15933dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma: "
15943dec9fcdSqs 	    "tx_rings $%p tx_desc_rings $%p",
15953dec9fcdSqs 	    hxgep->tx_rings, tx_desc_rings));
15963dec9fcdSqs 
15973dec9fcdSqs 	goto hxge_map_txdma_exit;
15983dec9fcdSqs 
15993dec9fcdSqs hxge_map_txdma_fail1:
16003dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
16013dec9fcdSqs 	    "==> hxge_map_txdma: uninit tx desc "
16023dec9fcdSqs 	    "(status 0x%x channel %d i %d)", hxgep, status, channel, i));
16033dec9fcdSqs 	i--;
16043dec9fcdSqs 	for (; i >= 0; i--) {
16053dec9fcdSqs 		channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
16063dec9fcdSqs 		hxge_unmap_txdma_channel(hxgep, channel, tx_desc_rings[i],
16073dec9fcdSqs 		    tx_mbox_p[i]);
16083dec9fcdSqs 	}
16093dec9fcdSqs 
16103dec9fcdSqs 	KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas);
16113dec9fcdSqs 	KMEM_FREE(tx_rings, sizeof (tx_rings_t));
16123dec9fcdSqs 	KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas);
16133dec9fcdSqs 	KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t));
16143dec9fcdSqs 
16153dec9fcdSqs hxge_map_txdma_exit:
16163dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
16173dec9fcdSqs 	    "==> hxge_map_txdma: (status 0x%x channel %d)", status, channel));
16183dec9fcdSqs 
16193dec9fcdSqs 	return (status);
16203dec9fcdSqs }
16213dec9fcdSqs 
16223dec9fcdSqs static void
hxge_unmap_txdma(p_hxge_t hxgep)16233dec9fcdSqs hxge_unmap_txdma(p_hxge_t hxgep)
16243dec9fcdSqs {
16253dec9fcdSqs 	int			i, ndmas;
16263dec9fcdSqs 	uint8_t			channel;
16273dec9fcdSqs 	p_tx_rings_t		tx_rings;
16283dec9fcdSqs 	p_tx_ring_t		*tx_desc_rings;
16293dec9fcdSqs 	p_tx_mbox_areas_t	tx_mbox_areas_p;
16303dec9fcdSqs 	p_tx_mbox_t		*tx_mbox_p;
16313dec9fcdSqs 	p_hxge_dma_pool_t	dma_buf_poolp;
16323dec9fcdSqs 
16333dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_unmap_txdma"));
16343dec9fcdSqs 
16353dec9fcdSqs 	dma_buf_poolp = hxgep->tx_buf_pool_p;
16363dec9fcdSqs 	if (!dma_buf_poolp->buf_allocated) {
16373dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
16383dec9fcdSqs 		    "==> hxge_unmap_txdma: buf not allocated"));
16393dec9fcdSqs 		return;
16403dec9fcdSqs 	}
16413dec9fcdSqs 	ndmas = dma_buf_poolp->ndmas;
16423dec9fcdSqs 	if (!ndmas) {
16433dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
16443dec9fcdSqs 		    "<== hxge_unmap_txdma: no dma allocated"));
16453dec9fcdSqs 		return;
16463dec9fcdSqs 	}
16473dec9fcdSqs 	tx_rings = hxgep->tx_rings;
16483dec9fcdSqs 	tx_desc_rings = tx_rings->rings;
16493dec9fcdSqs 	if (tx_rings == NULL) {
16503dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
16513dec9fcdSqs 		    "<== hxge_unmap_txdma: NULL ring pointer"));
16523dec9fcdSqs 		return;
16533dec9fcdSqs 	}
16543dec9fcdSqs 	tx_desc_rings = tx_rings->rings;
16553dec9fcdSqs 	if (tx_desc_rings == NULL) {
16563dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
16573dec9fcdSqs 		    "<== hxge_unmap_txdma: NULL ring pointers"));
16583dec9fcdSqs 		return;
16593dec9fcdSqs 	}
16603dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_unmap_txdma: "
16613dec9fcdSqs 	    "tx_rings $%p tx_desc_rings $%p ndmas %d",
16623dec9fcdSqs 	    tx_rings, tx_desc_rings, ndmas));
16633dec9fcdSqs 
16643dec9fcdSqs 	tx_mbox_areas_p = hxgep->tx_mbox_areas_p;
16653dec9fcdSqs 	tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
16663dec9fcdSqs 
16673dec9fcdSqs 	for (i = 0; i < ndmas; i++) {
16683dec9fcdSqs 		channel = tx_desc_rings[i]->tdc;
16693dec9fcdSqs 		(void) hxge_unmap_txdma_channel(hxgep, channel,
16703dec9fcdSqs 		    (p_tx_ring_t)tx_desc_rings[i],
16713dec9fcdSqs 		    (p_tx_mbox_t)tx_mbox_p[i]);
16723dec9fcdSqs 	}
16733dec9fcdSqs 
16743dec9fcdSqs 	KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas);
16753dec9fcdSqs 	KMEM_FREE(tx_rings, sizeof (tx_rings_t));
16763dec9fcdSqs 	KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas);
16773dec9fcdSqs 	KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t));
16783dec9fcdSqs 
16793dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_unmap_txdma"));
16803dec9fcdSqs }
16813dec9fcdSqs 
16823dec9fcdSqs static hxge_status_t
hxge_map_txdma_channel(p_hxge_t hxgep,uint16_t channel,p_hxge_dma_common_t * dma_buf_p,p_tx_ring_t * tx_desc_p,uint32_t num_chunks,p_hxge_dma_common_t * dma_cntl_p,p_tx_mbox_t * tx_mbox_p)16833dec9fcdSqs hxge_map_txdma_channel(p_hxge_t hxgep, uint16_t channel,
16843dec9fcdSqs     p_hxge_dma_common_t *dma_buf_p, p_tx_ring_t *tx_desc_p,
16853dec9fcdSqs     uint32_t num_chunks, p_hxge_dma_common_t *dma_cntl_p,
16863dec9fcdSqs     p_tx_mbox_t *tx_mbox_p)
16873dec9fcdSqs {
16883dec9fcdSqs 	int status = HXGE_OK;
16893dec9fcdSqs 
16903dec9fcdSqs 	/*
16913dec9fcdSqs 	 * Set up and prepare buffer blocks, descriptors and mailbox.
16923dec9fcdSqs 	 */
16933dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
16943dec9fcdSqs 	    "==> hxge_map_txdma_channel (channel %d)", channel));
16953dec9fcdSqs 
16963dec9fcdSqs 	/*
16973dec9fcdSqs 	 * Transmit buffer blocks
16983dec9fcdSqs 	 */
16993dec9fcdSqs 	status = hxge_map_txdma_channel_buf_ring(hxgep, channel,
17003dec9fcdSqs 	    dma_buf_p, tx_desc_p, num_chunks);
17013dec9fcdSqs 	if (status != HXGE_OK) {
17023dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
17033dec9fcdSqs 		    "==> hxge_map_txdma_channel (channel %d): "
17043dec9fcdSqs 		    "map buffer failed 0x%x", channel, status));
17053dec9fcdSqs 		goto hxge_map_txdma_channel_exit;
17063dec9fcdSqs 	}
17073dec9fcdSqs 	/*
17083dec9fcdSqs 	 * Transmit block ring, and mailbox.
17093dec9fcdSqs 	 */
17103dec9fcdSqs 	hxge_map_txdma_channel_cfg_ring(hxgep, channel, dma_cntl_p, *tx_desc_p,
17113dec9fcdSqs 	    tx_mbox_p);
17123dec9fcdSqs 
17133dec9fcdSqs 	goto hxge_map_txdma_channel_exit;
17143dec9fcdSqs 
17153dec9fcdSqs hxge_map_txdma_channel_fail1:
17163dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
17173dec9fcdSqs 	    "==> hxge_map_txdma_channel: unmap buf"
17183dec9fcdSqs 	    "(status 0x%x channel %d)", status, channel));
17193dec9fcdSqs 	hxge_unmap_txdma_channel_buf_ring(hxgep, *tx_desc_p);
17203dec9fcdSqs 
17213dec9fcdSqs hxge_map_txdma_channel_exit:
17223dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
17233dec9fcdSqs 	    "<== hxge_map_txdma_channel: (status 0x%x channel %d)",
17243dec9fcdSqs 	    status, channel));
17253dec9fcdSqs 
17263dec9fcdSqs 	return (status);
17273dec9fcdSqs }
17283dec9fcdSqs 
17293dec9fcdSqs /*ARGSUSED*/
17303dec9fcdSqs static void
hxge_unmap_txdma_channel(p_hxge_t hxgep,uint16_t channel,p_tx_ring_t tx_ring_p,p_tx_mbox_t tx_mbox_p)17313dec9fcdSqs hxge_unmap_txdma_channel(p_hxge_t hxgep, uint16_t channel,
17323dec9fcdSqs     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
17333dec9fcdSqs {
17343dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
17353dec9fcdSqs 	    "==> hxge_unmap_txdma_channel (channel %d)", channel));
17363dec9fcdSqs 
17373dec9fcdSqs 	/* unmap tx block ring, and mailbox.  */
17383dec9fcdSqs 	(void) hxge_unmap_txdma_channel_cfg_ring(hxgep, tx_ring_p, tx_mbox_p);
17393dec9fcdSqs 
17403dec9fcdSqs 	/* unmap buffer blocks */
17413dec9fcdSqs 	(void) hxge_unmap_txdma_channel_buf_ring(hxgep, tx_ring_p);
17423dec9fcdSqs 
17433dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_unmap_txdma_channel"));
17443dec9fcdSqs }
17453dec9fcdSqs 
17463dec9fcdSqs /*ARGSUSED*/
17473dec9fcdSqs static void
hxge_map_txdma_channel_cfg_ring(p_hxge_t hxgep,uint16_t dma_channel,p_hxge_dma_common_t * dma_cntl_p,p_tx_ring_t tx_ring_p,p_tx_mbox_t * tx_mbox_p)17483dec9fcdSqs hxge_map_txdma_channel_cfg_ring(p_hxge_t hxgep, uint16_t dma_channel,
17493dec9fcdSqs     p_hxge_dma_common_t *dma_cntl_p, p_tx_ring_t tx_ring_p,
17503dec9fcdSqs     p_tx_mbox_t *tx_mbox_p)
17513dec9fcdSqs {
17523dec9fcdSqs 	p_tx_mbox_t		mboxp;
17533dec9fcdSqs 	p_hxge_dma_common_t	cntl_dmap;
17543dec9fcdSqs 	p_hxge_dma_common_t	dmap;
17553dec9fcdSqs 	tdc_tdr_cfg_t		*tx_ring_cfig_p;
17563dec9fcdSqs 	tdc_tdr_kick_t		*tx_ring_kick_p;
17573dec9fcdSqs 	tdc_tdr_cfg_t		*tx_cs_p;
17583dec9fcdSqs 	tdc_int_mask_t		*tx_evmask_p;
17593dec9fcdSqs 	tdc_mbh_t		*mboxh_p;
17603dec9fcdSqs 	tdc_mbl_t		*mboxl_p;
17613dec9fcdSqs 	uint64_t		tx_desc_len;
17623dec9fcdSqs 
17633dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
17643dec9fcdSqs 	    "==> hxge_map_txdma_channel_cfg_ring"));
17653dec9fcdSqs 
17663dec9fcdSqs 	cntl_dmap = *dma_cntl_p;
17673dec9fcdSqs 
17683dec9fcdSqs 	dmap = (p_hxge_dma_common_t)&tx_ring_p->tdc_desc;
17693dec9fcdSqs 	hxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size,
17703dec9fcdSqs 	    sizeof (tx_desc_t));
17713dec9fcdSqs 
17723dec9fcdSqs 	/*
17733dec9fcdSqs 	 * Zero out transmit ring descriptors.
17743dec9fcdSqs 	 */
17753dec9fcdSqs 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
17763dec9fcdSqs 	tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig);
17773dec9fcdSqs 	tx_ring_kick_p = &(tx_ring_p->tx_ring_kick);
17783dec9fcdSqs 	tx_cs_p = &(tx_ring_p->tx_cs);
17793dec9fcdSqs 	tx_evmask_p = &(tx_ring_p->tx_evmask);
17803dec9fcdSqs 	tx_ring_cfig_p->value = 0;
17813dec9fcdSqs 	tx_ring_kick_p->value = 0;
17823dec9fcdSqs 	tx_cs_p->value = 0;
17833dec9fcdSqs 	tx_evmask_p->value = 0;
17843dec9fcdSqs 
17853dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
17863dec9fcdSqs 	    "==> hxge_map_txdma_channel_cfg_ring: channel %d des $%p",
17873dec9fcdSqs 	    dma_channel, dmap->dma_cookie.dmac_laddress));
17883dec9fcdSqs 
17893dec9fcdSqs 	tx_ring_cfig_p->value = 0;
17903dec9fcdSqs 
17913dec9fcdSqs 	/* Hydra len is 11 bits and the lower 5 bits are 0s */
17923dec9fcdSqs 	tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 5);
17933dec9fcdSqs 	tx_ring_cfig_p->value =
17943dec9fcdSqs 	    (dmap->dma_cookie.dmac_laddress & TDC_TDR_CFG_ADDR_MASK) |
17953dec9fcdSqs 	    (tx_desc_len << TDC_TDR_CFG_LEN_SHIFT);
17963dec9fcdSqs 
17973dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
17983dec9fcdSqs 	    "==> hxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx",
17993dec9fcdSqs 	    dma_channel, tx_ring_cfig_p->value));
18003dec9fcdSqs 
18013dec9fcdSqs 	tx_cs_p->bits.reset = 1;
18023dec9fcdSqs 
18033dec9fcdSqs 	/* Map in mailbox */
18043dec9fcdSqs 	mboxp = (p_tx_mbox_t)KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP);
18053dec9fcdSqs 	dmap = (p_hxge_dma_common_t)&mboxp->tx_mbox;
18063dec9fcdSqs 	hxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t));
18073dec9fcdSqs 	mboxh_p = (tdc_mbh_t *)&tx_ring_p->tx_mbox_mbh;
18083dec9fcdSqs 	mboxl_p = (tdc_mbl_t *)&tx_ring_p->tx_mbox_mbl;
18093dec9fcdSqs 	mboxh_p->value = mboxl_p->value = 0;
18103dec9fcdSqs 
18113dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
18123dec9fcdSqs 	    "==> hxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
18133dec9fcdSqs 	    dmap->dma_cookie.dmac_laddress));
18143dec9fcdSqs 
18153dec9fcdSqs 	mboxh_p->bits.mbaddr = ((dmap->dma_cookie.dmac_laddress >>
18163dec9fcdSqs 	    TDC_MBH_ADDR_SHIFT) & TDC_MBH_MASK);
18173dec9fcdSqs 	mboxl_p->bits.mbaddr = ((dmap->dma_cookie.dmac_laddress &
18183dec9fcdSqs 	    TDC_MBL_MASK) >> TDC_MBL_SHIFT);
18193dec9fcdSqs 
18203dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
18213dec9fcdSqs 	    "==> hxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
18223dec9fcdSqs 	    dmap->dma_cookie.dmac_laddress));
18233dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
18243dec9fcdSqs 	    "==> hxge_map_txdma_channel_cfg_ring: hmbox $%p mbox $%p",
18253dec9fcdSqs 	    mboxh_p->bits.mbaddr, mboxl_p->bits.mbaddr));
18263dec9fcdSqs 
18273dec9fcdSqs 	/*
18283dec9fcdSqs 	 * Set page valid and no mask
18293dec9fcdSqs 	 */
18303dec9fcdSqs 	tx_ring_p->page_hdl.value = 0;
18313dec9fcdSqs 
18323dec9fcdSqs 	*tx_mbox_p = mboxp;
18333dec9fcdSqs 
18343dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
18353dec9fcdSqs 	    "<== hxge_map_txdma_channel_cfg_ring"));
18363dec9fcdSqs }
18373dec9fcdSqs 
18383dec9fcdSqs /*ARGSUSED*/
18393dec9fcdSqs static void
hxge_unmap_txdma_channel_cfg_ring(p_hxge_t hxgep,p_tx_ring_t tx_ring_p,p_tx_mbox_t tx_mbox_p)18403dec9fcdSqs hxge_unmap_txdma_channel_cfg_ring(p_hxge_t hxgep,
18413dec9fcdSqs     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
18423dec9fcdSqs {
18433dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
18443dec9fcdSqs 	    "==> hxge_unmap_txdma_channel_cfg_ring: channel %d",
18453dec9fcdSqs 	    tx_ring_p->tdc));
18463dec9fcdSqs 
18473dec9fcdSqs 	KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t));
18483dec9fcdSqs 
18493dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
18503dec9fcdSqs 	    "<== hxge_unmap_txdma_channel_cfg_ring"));
18513dec9fcdSqs }
18523dec9fcdSqs 
18533dec9fcdSqs static hxge_status_t
hxge_map_txdma_channel_buf_ring(p_hxge_t hxgep,uint16_t channel,p_hxge_dma_common_t * dma_buf_p,p_tx_ring_t * tx_desc_p,uint32_t num_chunks)18543dec9fcdSqs hxge_map_txdma_channel_buf_ring(p_hxge_t hxgep, uint16_t channel,
18553dec9fcdSqs     p_hxge_dma_common_t *dma_buf_p,
18563dec9fcdSqs     p_tx_ring_t *tx_desc_p, uint32_t num_chunks)
18573dec9fcdSqs {
18583dec9fcdSqs 	p_hxge_dma_common_t	dma_bufp, tmp_bufp;
18593dec9fcdSqs 	p_hxge_dma_common_t	dmap;
18603dec9fcdSqs 	hxge_os_dma_handle_t	tx_buf_dma_handle;
18613dec9fcdSqs 	p_tx_ring_t		tx_ring_p;
18623dec9fcdSqs 	p_tx_msg_t		tx_msg_ring;
18633dec9fcdSqs 	hxge_status_t		status = HXGE_OK;
18643dec9fcdSqs 	int			ddi_status = DDI_SUCCESS;
18653dec9fcdSqs 	int			i, j, index;
18663dec9fcdSqs 	uint32_t		size, bsize;
18673dec9fcdSqs 	uint32_t		nblocks, nmsgs;
18681ed83081SMichael Speer 	char			qname[TASKQ_NAMELEN];
18693dec9fcdSqs 
18703dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
18713dec9fcdSqs 	    "==> hxge_map_txdma_channel_buf_ring"));
18723dec9fcdSqs 
18733dec9fcdSqs 	dma_bufp = tmp_bufp = *dma_buf_p;
18743dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
18753dec9fcdSqs 	    " hxge_map_txdma_channel_buf_ring: channel %d to map %d "
18763dec9fcdSqs 	    "chunks bufp $%p", channel, num_chunks, dma_bufp));
18773dec9fcdSqs 
18783dec9fcdSqs 	nmsgs = 0;
18793dec9fcdSqs 	for (i = 0; i < num_chunks; i++, tmp_bufp++) {
18803dec9fcdSqs 		nmsgs += tmp_bufp->nblocks;
18813dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
18823dec9fcdSqs 		    "==> hxge_map_txdma_channel_buf_ring: channel %d "
18833dec9fcdSqs 		    "bufp $%p nblocks %d nmsgs %d",
18843dec9fcdSqs 		    channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
18853dec9fcdSqs 	}
18863dec9fcdSqs 	if (!nmsgs) {
18873dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
18883dec9fcdSqs 		    "<== hxge_map_txdma_channel_buf_ring: channel %d "
18893dec9fcdSqs 		    "no msg blocks", channel));
18903dec9fcdSqs 		status = HXGE_ERROR;
18913dec9fcdSqs 
18923dec9fcdSqs 		goto hxge_map_txdma_channel_buf_ring_exit;
18933dec9fcdSqs 	}
18941ed83081SMichael Speer 
18953dec9fcdSqs 	tx_ring_p = (p_tx_ring_t)KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP);
18961ed83081SMichael Speer 	tx_ring_p->hxgep = hxgep;
18971ed83081SMichael Speer 	(void) snprintf(qname, TASKQ_NAMELEN, "hxge_%d_%d",
18981ed83081SMichael Speer 	    hxgep->instance, channel);
18991ed83081SMichael Speer 	tx_ring_p->taskq = ddi_taskq_create(hxgep->dip, qname, 1,
19001ed83081SMichael Speer 	    TASKQ_DEFAULTPRI, 0);
19011ed83081SMichael Speer 	if (tx_ring_p->taskq == NULL) {
19021ed83081SMichael Speer 		goto hxge_map_txdma_channel_buf_ring_fail1;
19031ed83081SMichael Speer 	}
19041ed83081SMichael Speer 
19053dec9fcdSqs 	MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER,
19063dec9fcdSqs 	    (void *) hxgep->interrupt_cookie);
19073dec9fcdSqs 	/*
19083dec9fcdSqs 	 * Allocate transmit message rings and handles for packets not to be
19093dec9fcdSqs 	 * copied to premapped buffers.
19103dec9fcdSqs 	 */
19113dec9fcdSqs 	size = nmsgs * sizeof (tx_msg_t);
19123dec9fcdSqs 	tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
19133dec9fcdSqs 	for (i = 0; i < nmsgs; i++) {
19143dec9fcdSqs 		ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr,
19153dec9fcdSqs 		    DDI_DMA_DONTWAIT, 0, &tx_msg_ring[i].dma_handle);
19163dec9fcdSqs 		if (ddi_status != DDI_SUCCESS) {
19173dec9fcdSqs 			status |= HXGE_DDI_FAILED;
19183dec9fcdSqs 			break;
19193dec9fcdSqs 		}
19203dec9fcdSqs 	}
19213dec9fcdSqs 
19223dec9fcdSqs 	if (i < nmsgs) {
19233dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, HXGE_ERR_CTL,
19243dec9fcdSqs 		    "Allocate handles failed."));
19253dec9fcdSqs 
19263dec9fcdSqs 		goto hxge_map_txdma_channel_buf_ring_fail1;
19273dec9fcdSqs 	}
19283dec9fcdSqs 	tx_ring_p->tdc = channel;
19293dec9fcdSqs 	tx_ring_p->tx_msg_ring = tx_msg_ring;
19303dec9fcdSqs 	tx_ring_p->tx_ring_size = nmsgs;
19313dec9fcdSqs 	tx_ring_p->num_chunks = num_chunks;
19323dec9fcdSqs 	if (!hxge_tx_intr_thres) {
19333dec9fcdSqs 		hxge_tx_intr_thres = tx_ring_p->tx_ring_size / 4;
19343dec9fcdSqs 	}
19353dec9fcdSqs 	tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1;
19363dec9fcdSqs 	tx_ring_p->rd_index = 0;
19373dec9fcdSqs 	tx_ring_p->wr_index = 0;
19383dec9fcdSqs 	tx_ring_p->ring_head.value = 0;
19393dec9fcdSqs 	tx_ring_p->ring_kick_tail.value = 0;
19403dec9fcdSqs 	tx_ring_p->descs_pending = 0;
19413dec9fcdSqs 
19423dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
19433dec9fcdSqs 	    "==> hxge_map_txdma_channel_buf_ring: channel %d "
19443dec9fcdSqs 	    "actual tx desc max %d nmsgs %d (config hxge_tx_ring_size %d)",
19453dec9fcdSqs 	    channel, tx_ring_p->tx_ring_size, nmsgs, hxge_tx_ring_size));
19463dec9fcdSqs 
19473dec9fcdSqs 	/*
19483dec9fcdSqs 	 * Map in buffers from the buffer pool.
19493dec9fcdSqs 	 */
19503dec9fcdSqs 	index = 0;
19513dec9fcdSqs 	bsize = dma_bufp->block_size;
19523dec9fcdSqs 
19533dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma_channel_buf_ring: "
19543dec9fcdSqs 	    "dma_bufp $%p tx_rng_p $%p tx_msg_rng_p $%p bsize %d",
19553dec9fcdSqs 	    dma_bufp, tx_ring_p, tx_msg_ring, bsize));
19563dec9fcdSqs 
19573dec9fcdSqs 	for (i = 0; i < num_chunks; i++, dma_bufp++) {
19583dec9fcdSqs 		bsize = dma_bufp->block_size;
19593dec9fcdSqs 		nblocks = dma_bufp->nblocks;
1960cf6ef894SMichael Speer 		tx_buf_dma_handle = dma_bufp->dma_handle;
19613dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
19623dec9fcdSqs 		    "==> hxge_map_txdma_channel_buf_ring: dma chunk %d "
19633dec9fcdSqs 		    "size %d dma_bufp $%p",
19643dec9fcdSqs 		    i, sizeof (hxge_dma_common_t), dma_bufp));
19653dec9fcdSqs 
19663dec9fcdSqs 		for (j = 0; j < nblocks; j++) {
19673dec9fcdSqs 			tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle;
1968cf6ef894SMichael Speer 			tx_msg_ring[index].offset_index = j;
19693dec9fcdSqs 			dmap = &tx_msg_ring[index++].buf_dma;
19703dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
19713dec9fcdSqs 			    "==> hxge_map_txdma_channel_buf_ring: j %d"
19723dec9fcdSqs 			    "dmap $%p", i, dmap));
19733dec9fcdSqs 			hxge_setup_dma_common(dmap, dma_bufp, 1, bsize);
19743dec9fcdSqs 		}
19753dec9fcdSqs 	}
19763dec9fcdSqs 
19773dec9fcdSqs 	if (i < num_chunks) {
19783dec9fcdSqs 		status = HXGE_ERROR;
19793dec9fcdSqs 
19803dec9fcdSqs 		goto hxge_map_txdma_channel_buf_ring_fail1;
19813dec9fcdSqs 	}
19823dec9fcdSqs 
19833dec9fcdSqs 	*tx_desc_p = tx_ring_p;
19843dec9fcdSqs 
19853dec9fcdSqs 	goto hxge_map_txdma_channel_buf_ring_exit;
19863dec9fcdSqs 
19873dec9fcdSqs hxge_map_txdma_channel_buf_ring_fail1:
19881ed83081SMichael Speer 	if (tx_ring_p->taskq) {
19891ed83081SMichael Speer 		ddi_taskq_destroy(tx_ring_p->taskq);
19901ed83081SMichael Speer 		tx_ring_p->taskq = NULL;
19911ed83081SMichael Speer 	}
19921ed83081SMichael Speer 
19933dec9fcdSqs 	index--;
19943dec9fcdSqs 	for (; index >= 0; index--) {
19953dec9fcdSqs 		if (tx_msg_ring[index].dma_handle != NULL) {
19963dec9fcdSqs 			ddi_dma_free_handle(&tx_msg_ring[index].dma_handle);
19973dec9fcdSqs 		}
19983dec9fcdSqs 	}
19993dec9fcdSqs 	MUTEX_DESTROY(&tx_ring_p->lock);
20003dec9fcdSqs 	KMEM_FREE(tx_msg_ring, size);
20013dec9fcdSqs 	KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
20023dec9fcdSqs 
20033dec9fcdSqs 	status = HXGE_ERROR;
20043dec9fcdSqs 
20053dec9fcdSqs hxge_map_txdma_channel_buf_ring_exit:
20063dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
20073dec9fcdSqs 	    "<== hxge_map_txdma_channel_buf_ring status 0x%x", status));
20083dec9fcdSqs 
20093dec9fcdSqs 	return (status);
20103dec9fcdSqs }
20113dec9fcdSqs 
20123dec9fcdSqs /*ARGSUSED*/
20133dec9fcdSqs static void
hxge_unmap_txdma_channel_buf_ring(p_hxge_t hxgep,p_tx_ring_t tx_ring_p)20143dec9fcdSqs hxge_unmap_txdma_channel_buf_ring(p_hxge_t hxgep, p_tx_ring_t tx_ring_p)
20153dec9fcdSqs {
20163dec9fcdSqs 	p_tx_msg_t	tx_msg_ring;
20173dec9fcdSqs 	p_tx_msg_t	tx_msg_p;
20183dec9fcdSqs 	int		i;
20193dec9fcdSqs 
20203dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
20213dec9fcdSqs 	    "==> hxge_unmap_txdma_channel_buf_ring"));
20223dec9fcdSqs 	if (tx_ring_p == NULL) {
20233dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
20243dec9fcdSqs 		    "<== hxge_unmap_txdma_channel_buf_ring: NULL ringp"));
20253dec9fcdSqs 		return;
20263dec9fcdSqs 	}
20273dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
20283dec9fcdSqs 	    "==> hxge_unmap_txdma_channel_buf_ring: channel %d",
20293dec9fcdSqs 	    tx_ring_p->tdc));
20303dec9fcdSqs 
20311ed83081SMichael Speer 	MUTEX_ENTER(&tx_ring_p->lock);
20323dec9fcdSqs 	tx_msg_ring = tx_ring_p->tx_msg_ring;
20333dec9fcdSqs 	for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
20343dec9fcdSqs 		tx_msg_p = &tx_msg_ring[i];
20353dec9fcdSqs 		if (tx_msg_p->flags.dma_type == USE_DVMA) {
20363dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "entry = %d", i));
20373dec9fcdSqs 			(void) dvma_unload(tx_msg_p->dvma_handle, 0, -1);
20383dec9fcdSqs 			tx_msg_p->dvma_handle = NULL;
20393dec9fcdSqs 			if (tx_ring_p->dvma_wr_index ==
20403dec9fcdSqs 			    tx_ring_p->dvma_wrap_mask) {
20413dec9fcdSqs 				tx_ring_p->dvma_wr_index = 0;
20423dec9fcdSqs 			} else {
20433dec9fcdSqs 				tx_ring_p->dvma_wr_index++;
20443dec9fcdSqs 			}
20453dec9fcdSqs 			tx_ring_p->dvma_pending--;
20463dec9fcdSqs 		} else if (tx_msg_p->flags.dma_type == USE_DMA) {
20473dec9fcdSqs 			if (ddi_dma_unbind_handle(tx_msg_p->dma_handle)) {
20483dec9fcdSqs 				cmn_err(CE_WARN, "hxge_unmap_tx_bug_ring: "
20493dec9fcdSqs 				    "ddi_dma_unbind_handle failed.");
20503dec9fcdSqs 			}
20513dec9fcdSqs 		}
20523dec9fcdSqs 		if (tx_msg_p->tx_message != NULL) {
20533dec9fcdSqs 			freemsg(tx_msg_p->tx_message);
20543dec9fcdSqs 			tx_msg_p->tx_message = NULL;
20553dec9fcdSqs 		}
20563dec9fcdSqs 	}
20573dec9fcdSqs 
20583dec9fcdSqs 	for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
20593dec9fcdSqs 		if (tx_msg_ring[i].dma_handle != NULL) {
20603dec9fcdSqs 			ddi_dma_free_handle(&tx_msg_ring[i].dma_handle);
20613dec9fcdSqs 		}
20623dec9fcdSqs 	}
20631ed83081SMichael Speer 	MUTEX_EXIT(&tx_ring_p->lock);
20641ed83081SMichael Speer 
20651ed83081SMichael Speer 	if (tx_ring_p->taskq) {
20661ed83081SMichael Speer 		ddi_taskq_destroy(tx_ring_p->taskq);
20671ed83081SMichael Speer 		tx_ring_p->taskq = NULL;
20681ed83081SMichael Speer 	}
20693dec9fcdSqs 
20703dec9fcdSqs 	MUTEX_DESTROY(&tx_ring_p->lock);
20713dec9fcdSqs 	KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size);
20723dec9fcdSqs 	KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
20733dec9fcdSqs 
20743dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
20753dec9fcdSqs 	    "<== hxge_unmap_txdma_channel_buf_ring"));
20763dec9fcdSqs }
20773dec9fcdSqs 
20783dec9fcdSqs static hxge_status_t
hxge_txdma_hw_start(p_hxge_t hxgep)20793dec9fcdSqs hxge_txdma_hw_start(p_hxge_t hxgep)
20803dec9fcdSqs {
20813dec9fcdSqs 	int			i, ndmas;
20823dec9fcdSqs 	uint16_t		channel;
20833dec9fcdSqs 	p_tx_rings_t		tx_rings;
20843dec9fcdSqs 	p_tx_ring_t		*tx_desc_rings;
20853dec9fcdSqs 	p_tx_mbox_areas_t	tx_mbox_areas_p;
20863dec9fcdSqs 	p_tx_mbox_t		*tx_mbox_p;
20873dec9fcdSqs 	hxge_status_t		status = HXGE_OK;
20883dec9fcdSqs 	uint64_t		tmp;
20893dec9fcdSqs 
20903dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_start"));
20913dec9fcdSqs 
20923dec9fcdSqs 	/*
20933dec9fcdSqs 	 * Initialize REORD Table 1. Disable VMAC 2. Reset the FIFO Err Stat.
20943dec9fcdSqs 	 * 3. Scrub memory and check for errors.
20953dec9fcdSqs 	 */
20963dec9fcdSqs 	(void) hxge_tx_vmac_disable(hxgep);
20973dec9fcdSqs 
20983dec9fcdSqs 	/*
20993dec9fcdSqs 	 * Clear the error status
21003dec9fcdSqs 	 */
21013dec9fcdSqs 	HXGE_REG_WR64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, 0x7);
21023dec9fcdSqs 
21033dec9fcdSqs 	/*
21043dec9fcdSqs 	 * Scrub the rtab memory for the TDC and reset the TDC.
21053dec9fcdSqs 	 */
21063dec9fcdSqs 	HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_HI, 0x0ULL);
21073dec9fcdSqs 	HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_LO, 0x0ULL);
21083dec9fcdSqs 
21093dec9fcdSqs 	for (i = 0; i < 256; i++) {
21103dec9fcdSqs 		HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_CMD,
21113dec9fcdSqs 		    (uint64_t)i);
21123dec9fcdSqs 
21133dec9fcdSqs 		/*
21143dec9fcdSqs 		 * Write the command register with an indirect read instruction
21153dec9fcdSqs 		 */
21163dec9fcdSqs 		tmp = (0x1ULL << 30) | i;
21173dec9fcdSqs 		HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_CMD, tmp);
21183dec9fcdSqs 
21193dec9fcdSqs 		/*
21203dec9fcdSqs 		 * Wait for status done
21213dec9fcdSqs 		 */
21223dec9fcdSqs 		tmp = 0;
21233dec9fcdSqs 		do {
21243dec9fcdSqs 			HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_CMD,
21253dec9fcdSqs 			    &tmp);
21263dec9fcdSqs 		} while (((tmp >> 31) & 0x1ULL) == 0x0);
21273dec9fcdSqs 	}
21283dec9fcdSqs 
21293dec9fcdSqs 	for (i = 0; i < 256; i++) {
21303dec9fcdSqs 		/*
21313dec9fcdSqs 		 * Write the command register with an indirect read instruction
21323dec9fcdSqs 		 */
21333dec9fcdSqs 		tmp = (0x1ULL << 30) | i;
21343dec9fcdSqs 		HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_CMD, tmp);
21353dec9fcdSqs 
21363dec9fcdSqs 		/*
21373dec9fcdSqs 		 * Wait for status done
21383dec9fcdSqs 		 */
21393dec9fcdSqs 		tmp = 0;
21403dec9fcdSqs 		do {
21413dec9fcdSqs 			HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_CMD,
21423dec9fcdSqs 			    &tmp);
21433dec9fcdSqs 		} while (((tmp >> 31) & 0x1ULL) == 0x0);
21443dec9fcdSqs 
21453dec9fcdSqs 		HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_HI, &tmp);
21463dec9fcdSqs 		if (0x1ff00ULL != (0x1ffffULL & tmp)) {
21473dec9fcdSqs 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl "
21483dec9fcdSqs 			    "unexpected data (hi), entry: %x, value: 0x%0llx\n",
21493dec9fcdSqs 			    i, (unsigned long long)tmp));
2150fe930412Sqs 			status = HXGE_ERROR;
21513dec9fcdSqs 		}
21523dec9fcdSqs 
21533dec9fcdSqs 		HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_LO, &tmp);
21543dec9fcdSqs 		if (tmp != 0) {
21553dec9fcdSqs 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl "
21563dec9fcdSqs 			    "unexpected data (lo), entry: %x\n", i));
2157fe930412Sqs 			status = HXGE_ERROR;
21583dec9fcdSqs 		}
21593dec9fcdSqs 
21603dec9fcdSqs 		HXGE_REG_RD64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, &tmp);
21613dec9fcdSqs 		if (tmp != 0) {
21623dec9fcdSqs 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl "
21633dec9fcdSqs 			    "parity error, entry: %x, val 0x%llx\n",
21643dec9fcdSqs 			    i, (unsigned long long)tmp));
2165fe930412Sqs 			status = HXGE_ERROR;
21663dec9fcdSqs 		}
21673dec9fcdSqs 
21683dec9fcdSqs 		HXGE_REG_RD64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, &tmp);
21693dec9fcdSqs 		if (tmp != 0) {
21703dec9fcdSqs 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl "
21713dec9fcdSqs 			    "parity error, entry: %x\n", i));
2172fe930412Sqs 			status = HXGE_ERROR;
21733dec9fcdSqs 		}
21743dec9fcdSqs 	}
21753dec9fcdSqs 
2176fe930412Sqs 	if (status != HXGE_OK)
2177fe930412Sqs 		goto hxge_txdma_hw_start_exit;
2178fe930412Sqs 
21793dec9fcdSqs 	/*
21803dec9fcdSqs 	 * Reset FIFO Error Status for the TDC and enable FIFO error events.
21813dec9fcdSqs 	 */
21823dec9fcdSqs 	HXGE_REG_WR64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, 0x7);
21833dec9fcdSqs 	HXGE_REG_WR64(hxgep->hpi_handle, TDC_FIFO_ERR_MASK, 0x0);
21843dec9fcdSqs 
21853dec9fcdSqs 	/*
21863dec9fcdSqs 	 * Initialize the Transmit DMAs.
21873dec9fcdSqs 	 */
21883dec9fcdSqs 	tx_rings = hxgep->tx_rings;
21893dec9fcdSqs 	if (tx_rings == NULL) {
21903dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
21913dec9fcdSqs 		    "<== hxge_txdma_hw_start: NULL ring pointer"));
21923dec9fcdSqs 		return (HXGE_ERROR);
21933dec9fcdSqs 	}
2194fe930412Sqs 
21953dec9fcdSqs 	tx_desc_rings = tx_rings->rings;
21963dec9fcdSqs 	if (tx_desc_rings == NULL) {
21973dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
21983dec9fcdSqs 		    "<== hxge_txdma_hw_start: NULL ring pointers"));
21993dec9fcdSqs 		return (HXGE_ERROR);
22003dec9fcdSqs 	}
22013dec9fcdSqs 	ndmas = tx_rings->ndmas;
22023dec9fcdSqs 	if (!ndmas) {
22033dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
22043dec9fcdSqs 		    "<== hxge_txdma_hw_start: no dma channel allocated"));
22053dec9fcdSqs 		return (HXGE_ERROR);
22063dec9fcdSqs 	}
22073dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_start: "
22083dec9fcdSqs 	    "tx_rings $%p tx_desc_rings $%p ndmas %d",
22093dec9fcdSqs 	    tx_rings, tx_desc_rings, ndmas));
22103dec9fcdSqs 
22113dec9fcdSqs 	tx_mbox_areas_p = hxgep->tx_mbox_areas_p;
22123dec9fcdSqs 	tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
22133dec9fcdSqs 
22143dec9fcdSqs 	/*
22153dec9fcdSqs 	 * Init the DMAs.
22163dec9fcdSqs 	 */
22173dec9fcdSqs 	for (i = 0; i < ndmas; i++) {
22183dec9fcdSqs 		channel = tx_desc_rings[i]->tdc;
22193dec9fcdSqs 		status = hxge_txdma_start_channel(hxgep, channel,
22203dec9fcdSqs 		    (p_tx_ring_t)tx_desc_rings[i],
22213dec9fcdSqs 		    (p_tx_mbox_t)tx_mbox_p[i]);
22223dec9fcdSqs 		if (status != HXGE_OK) {
22233dec9fcdSqs 			goto hxge_txdma_hw_start_fail1;
22243dec9fcdSqs 		}
22253dec9fcdSqs 	}
22263dec9fcdSqs 
22273dec9fcdSqs 	(void) hxge_tx_vmac_enable(hxgep);
22283dec9fcdSqs 
22293dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
22303dec9fcdSqs 	    "==> hxge_txdma_hw_start: tx_rings $%p rings $%p",
22313dec9fcdSqs 	    hxgep->tx_rings, hxgep->tx_rings->rings));
22323dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
22333dec9fcdSqs 	    "==> hxge_txdma_hw_start: tx_rings $%p tx_desc_rings $%p",
22343dec9fcdSqs 	    hxgep->tx_rings, tx_desc_rings));
22353dec9fcdSqs 
22363dec9fcdSqs 	goto hxge_txdma_hw_start_exit;
22373dec9fcdSqs 
22383dec9fcdSqs hxge_txdma_hw_start_fail1:
22393dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
22403dec9fcdSqs 	    "==> hxge_txdma_hw_start: disable (status 0x%x channel %d i %d)",
22413dec9fcdSqs 	    status, channel, i));
22423dec9fcdSqs 
22433dec9fcdSqs 	for (; i >= 0; i--) {
22443dec9fcdSqs 		channel = tx_desc_rings[i]->tdc,
22453dec9fcdSqs 		    (void) hxge_txdma_stop_channel(hxgep, channel,
22463dec9fcdSqs 		    (p_tx_ring_t)tx_desc_rings[i],
22473dec9fcdSqs 		    (p_tx_mbox_t)tx_mbox_p[i]);
22483dec9fcdSqs 	}
22493dec9fcdSqs 
22503dec9fcdSqs hxge_txdma_hw_start_exit:
22513dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
22523dec9fcdSqs 	    "==> hxge_txdma_hw_start: (status 0x%x)", status));
22533dec9fcdSqs 
22543dec9fcdSqs 	return (status);
22553dec9fcdSqs }
22563dec9fcdSqs 
22573dec9fcdSqs static void
hxge_txdma_hw_stop(p_hxge_t hxgep)22583dec9fcdSqs hxge_txdma_hw_stop(p_hxge_t hxgep)
22593dec9fcdSqs {
22603dec9fcdSqs 	int			i, ndmas;
22613dec9fcdSqs 	uint16_t		channel;
22623dec9fcdSqs 	p_tx_rings_t		tx_rings;
22633dec9fcdSqs 	p_tx_ring_t		*tx_desc_rings;
22643dec9fcdSqs 	p_tx_mbox_areas_t	tx_mbox_areas_p;
22653dec9fcdSqs 	p_tx_mbox_t		*tx_mbox_p;
22663dec9fcdSqs 
22673dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_stop"));
22683dec9fcdSqs 
22693dec9fcdSqs 	tx_rings = hxgep->tx_rings;
22703dec9fcdSqs 	if (tx_rings == NULL) {
22713dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
22723dec9fcdSqs 		    "<== hxge_txdma_hw_stop: NULL ring pointer"));
22733dec9fcdSqs 		return;
22743dec9fcdSqs 	}
22753dec9fcdSqs 
22763dec9fcdSqs 	tx_desc_rings = tx_rings->rings;
22773dec9fcdSqs 	if (tx_desc_rings == NULL) {
22783dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
22793dec9fcdSqs 		    "<== hxge_txdma_hw_stop: NULL ring pointers"));
22803dec9fcdSqs 		return;
22813dec9fcdSqs 	}
22823dec9fcdSqs 
22833dec9fcdSqs 	ndmas = tx_rings->ndmas;
22843dec9fcdSqs 	if (!ndmas) {
22853dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
22863dec9fcdSqs 		    "<== hxge_txdma_hw_stop: no dma channel allocated"));
22873dec9fcdSqs 		return;
22883dec9fcdSqs 	}
22893dec9fcdSqs 
22903dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_stop: "
22913dec9fcdSqs 	    "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings));
22923dec9fcdSqs 
22933dec9fcdSqs 	tx_mbox_areas_p = hxgep->tx_mbox_areas_p;
22943dec9fcdSqs 	tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
22953dec9fcdSqs 
22963dec9fcdSqs 	for (i = 0; i < ndmas; i++) {
22973dec9fcdSqs 		channel = tx_desc_rings[i]->tdc;
22983dec9fcdSqs 		(void) hxge_txdma_stop_channel(hxgep, channel,
22993dec9fcdSqs 		    (p_tx_ring_t)tx_desc_rings[i],
23003dec9fcdSqs 		    (p_tx_mbox_t)tx_mbox_p[i]);
23013dec9fcdSqs 	}
23023dec9fcdSqs 
23033dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_stop: "
23043dec9fcdSqs 	    "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings));
23053dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_txdma_hw_stop"));
23063dec9fcdSqs }
23073dec9fcdSqs 
23083dec9fcdSqs static hxge_status_t
hxge_txdma_start_channel(p_hxge_t hxgep,uint16_t channel,p_tx_ring_t tx_ring_p,p_tx_mbox_t tx_mbox_p)23093dec9fcdSqs hxge_txdma_start_channel(p_hxge_t hxgep, uint16_t channel,
23103dec9fcdSqs     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
23113dec9fcdSqs {
23123dec9fcdSqs 	hxge_status_t status = HXGE_OK;
23133dec9fcdSqs 
23143dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
23153dec9fcdSqs 	    "==> hxge_txdma_start_channel (channel %d)", channel));
23163dec9fcdSqs 	/*
23173dec9fcdSqs 	 * TXDMA/TXC must be in stopped state.
23183dec9fcdSqs 	 */
23193dec9fcdSqs 	(void) hxge_txdma_stop_inj_err(hxgep, channel);
23203dec9fcdSqs 
23213dec9fcdSqs 	/*
23223dec9fcdSqs 	 * Reset TXDMA channel
23233dec9fcdSqs 	 */
23243dec9fcdSqs 	tx_ring_p->tx_cs.value = 0;
23253dec9fcdSqs 	tx_ring_p->tx_cs.bits.reset = 1;
23263dec9fcdSqs 	status = hxge_reset_txdma_channel(hxgep, channel,
23273dec9fcdSqs 	    tx_ring_p->tx_cs.value);
23283dec9fcdSqs 	if (status != HXGE_OK) {
23293dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
23303dec9fcdSqs 		    "==> hxge_txdma_start_channel (channel %d)"
23313dec9fcdSqs 		    " reset channel failed 0x%x", channel, status));
23323dec9fcdSqs 
23333dec9fcdSqs 		goto hxge_txdma_start_channel_exit;
23343dec9fcdSqs 	}
23353dec9fcdSqs 
23363dec9fcdSqs 	/*
23373dec9fcdSqs 	 * Initialize the TXDMA channel specific FZC control configurations.
23383dec9fcdSqs 	 * These FZC registers are pertaining to each TX channel (i.e. logical
23393dec9fcdSqs 	 * pages).
23403dec9fcdSqs 	 */
23413dec9fcdSqs 	status = hxge_init_fzc_txdma_channel(hxgep, channel,
23423dec9fcdSqs 	    tx_ring_p, tx_mbox_p);
23433dec9fcdSqs 	if (status != HXGE_OK) {
23443dec9fcdSqs 		goto hxge_txdma_start_channel_exit;
23453dec9fcdSqs 	}
23463dec9fcdSqs 
23473dec9fcdSqs 	/*
23483dec9fcdSqs 	 * Initialize the event masks.
23493dec9fcdSqs 	 */
23503dec9fcdSqs 	tx_ring_p->tx_evmask.value = 0;
23513dec9fcdSqs 	status = hxge_init_txdma_channel_event_mask(hxgep,
23523dec9fcdSqs 	    channel, &tx_ring_p->tx_evmask);
23533dec9fcdSqs 	if (status != HXGE_OK) {
23543dec9fcdSqs 		goto hxge_txdma_start_channel_exit;
23553dec9fcdSqs 	}
23563dec9fcdSqs 
23573dec9fcdSqs 	/*
23583dec9fcdSqs 	 * Load TXDMA descriptors, buffers, mailbox, initialise the DMA
23593dec9fcdSqs 	 * channels and enable each DMA channel.
23603dec9fcdSqs 	 */
23613dec9fcdSqs 	status = hxge_enable_txdma_channel(hxgep, channel,
23623dec9fcdSqs 	    tx_ring_p, tx_mbox_p);
23633dec9fcdSqs 	if (status != HXGE_OK) {
23643dec9fcdSqs 		goto hxge_txdma_start_channel_exit;
23653dec9fcdSqs 	}
23663dec9fcdSqs 
23673dec9fcdSqs hxge_txdma_start_channel_exit:
23683dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_txdma_start_channel"));
23693dec9fcdSqs 
23703dec9fcdSqs 	return (status);
23713dec9fcdSqs }
23723dec9fcdSqs 
23733dec9fcdSqs /*ARGSUSED*/
23743dec9fcdSqs static hxge_status_t
hxge_txdma_stop_channel(p_hxge_t hxgep,uint16_t channel,p_tx_ring_t tx_ring_p,p_tx_mbox_t tx_mbox_p)23753dec9fcdSqs hxge_txdma_stop_channel(p_hxge_t hxgep, uint16_t channel,
23763dec9fcdSqs     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
23773dec9fcdSqs {
23783dec9fcdSqs 	int status = HXGE_OK;
23793dec9fcdSqs 
23803dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
23813dec9fcdSqs 	    "==> hxge_txdma_stop_channel: channel %d", channel));
23823dec9fcdSqs 
23833dec9fcdSqs 	/*
23843dec9fcdSqs 	 * Stop (disable) TXDMA and TXC (if stop bit is set and STOP_N_GO bit
23853dec9fcdSqs 	 * not set, the TXDMA reset state will not be set if reset TXDMA.
23863dec9fcdSqs 	 */
23873dec9fcdSqs 	(void) hxge_txdma_stop_inj_err(hxgep, channel);
23883dec9fcdSqs 
23893dec9fcdSqs 	/*
23903dec9fcdSqs 	 * Reset TXDMA channel
23913dec9fcdSqs 	 */
23923dec9fcdSqs 	tx_ring_p->tx_cs.value = 0;
23933dec9fcdSqs 	tx_ring_p->tx_cs.bits.reset = 1;
23943dec9fcdSqs 	status = hxge_reset_txdma_channel(hxgep, channel,
23953dec9fcdSqs 	    tx_ring_p->tx_cs.value);
23963dec9fcdSqs 	if (status != HXGE_OK) {
23973dec9fcdSqs 		goto hxge_txdma_stop_channel_exit;
23983dec9fcdSqs 	}
23993dec9fcdSqs 
24003dec9fcdSqs hxge_txdma_stop_channel_exit:
24013dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_txdma_stop_channel"));
24023dec9fcdSqs 
24033dec9fcdSqs 	return (status);
24043dec9fcdSqs }
24053dec9fcdSqs 
24063dec9fcdSqs static p_tx_ring_t
hxge_txdma_get_ring(p_hxge_t hxgep,uint16_t channel)24073dec9fcdSqs hxge_txdma_get_ring(p_hxge_t hxgep, uint16_t channel)
24083dec9fcdSqs {
24093dec9fcdSqs 	int		index, ndmas;
24103dec9fcdSqs 	uint16_t	tdc;
24113dec9fcdSqs 	p_tx_rings_t	tx_rings;
24123dec9fcdSqs 
24133dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_get_ring"));
24143dec9fcdSqs 
24153dec9fcdSqs 	tx_rings = hxgep->tx_rings;
24163dec9fcdSqs 	if (tx_rings == NULL) {
24173dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
24183dec9fcdSqs 		    "<== hxge_txdma_get_ring: NULL ring pointer"));
24193dec9fcdSqs 		return (NULL);
24203dec9fcdSqs 	}
24213dec9fcdSqs 	ndmas = tx_rings->ndmas;
24223dec9fcdSqs 	if (!ndmas) {
24233dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
24243dec9fcdSqs 		    "<== hxge_txdma_get_ring: no channel allocated"));
24253dec9fcdSqs 		return (NULL);
24263dec9fcdSqs 	}
24273dec9fcdSqs 	if (tx_rings->rings == NULL) {
24283dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, TX_CTL,
24293dec9fcdSqs 		    "<== hxge_txdma_get_ring: NULL rings pointer"));
24303dec9fcdSqs 		return (NULL);
24313dec9fcdSqs 	}
24323dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_get_ring: "
24333dec9fcdSqs 	    "tx_rings $%p tx_desc_rings $%p ndmas %d",
24343dec9fcdSqs 	    tx_rings, tx_rings, ndmas));
24353dec9fcdSqs 
24363dec9fcdSqs 	for (index = 0; index < ndmas; index++) {
24373dec9fcdSqs 		tdc = tx_rings->rings[index]->tdc;
24383dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
24393dec9fcdSqs 		    "==> hxge_fixup_txdma_rings: channel %d", tdc));
24403dec9fcdSqs 		if (channel == tdc) {
24413dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, TX_CTL,
24423dec9fcdSqs 			    "<== hxge_txdma_get_ring: tdc %d ring $%p",
24433dec9fcdSqs 			    tdc, tx_rings->rings[index]));
24443dec9fcdSqs 			return (p_tx_ring_t)(tx_rings->rings[index]);
24453dec9fcdSqs 		}
24463dec9fcdSqs 	}
24473dec9fcdSqs 
24483dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_get_ring"));
24493dec9fcdSqs 
24503dec9fcdSqs 	return (NULL);
24513dec9fcdSqs }
24523dec9fcdSqs 
24533dec9fcdSqs static p_tx_mbox_t
hxge_txdma_get_mbox(p_hxge_t hxgep,uint16_t channel)24543dec9fcdSqs hxge_txdma_get_mbox(p_hxge_t hxgep, uint16_t channel)
24553dec9fcdSqs {
24563dec9fcdSqs 	int			index, tdc, ndmas;
24573dec9fcdSqs 	p_tx_rings_t		tx_rings;
24583dec9fcdSqs 	p_tx_mbox_areas_t	tx_mbox_areas_p;
24593dec9fcdSqs 	p_tx_mbox_t		*tx_mbox_p;
24603dec9fcdSqs 
24613dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_get_mbox"));
24623dec9fcdSqs 
24633dec9fcdSqs 	tx_rings = hxgep->tx_rings;
24643dec9fcdSqs 	if (tx_rings == NULL) {
24653dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
24663dec9fcdSqs 		    "<== hxge_txdma_get_mbox: NULL ring pointer"));
24673dec9fcdSqs 		return (NULL);
24683dec9fcdSqs 	}
24693dec9fcdSqs 	tx_mbox_areas_p = hxgep->tx_mbox_areas_p;
24703dec9fcdSqs 	if (tx_mbox_areas_p == NULL) {
24713dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
24723dec9fcdSqs 		    "<== hxge_txdma_get_mbox: NULL mbox pointer"));
24733dec9fcdSqs 		return (NULL);
24743dec9fcdSqs 	}
24753dec9fcdSqs 	tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
24763dec9fcdSqs 
24773dec9fcdSqs 	ndmas = tx_rings->ndmas;
24783dec9fcdSqs 	if (!ndmas) {
24793dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
24803dec9fcdSqs 		    "<== hxge_txdma_get_mbox: no channel allocated"));
24813dec9fcdSqs 		return (NULL);
24823dec9fcdSqs 	}
24833dec9fcdSqs 	if (tx_rings->rings == NULL) {
24843dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
24853dec9fcdSqs 		    "<== hxge_txdma_get_mbox: NULL rings pointer"));
24863dec9fcdSqs 		return (NULL);
24873dec9fcdSqs 	}
24883dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_get_mbox: "
24893dec9fcdSqs 	    "tx_rings $%p tx_desc_rings $%p ndmas %d",
24903dec9fcdSqs 	    tx_rings, tx_rings, ndmas));
24913dec9fcdSqs 
24923dec9fcdSqs 	for (index = 0; index < ndmas; index++) {
24933dec9fcdSqs 		tdc = tx_rings->rings[index]->tdc;
24943dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
24953dec9fcdSqs 		    "==> hxge_txdma_get_mbox: channel %d", tdc));
24963dec9fcdSqs 		if (channel == tdc) {
24973dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, TX_CTL,
24983dec9fcdSqs 			    "<== hxge_txdma_get_mbox: tdc %d ring $%p",
24993dec9fcdSqs 			    tdc, tx_rings->rings[index]));
25003dec9fcdSqs 			return (p_tx_mbox_t)(tx_mbox_p[index]);
25013dec9fcdSqs 		}
25023dec9fcdSqs 	}
25033dec9fcdSqs 
25043dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_get_mbox"));
25053dec9fcdSqs 
25063dec9fcdSqs 	return (NULL);
25073dec9fcdSqs }
25083dec9fcdSqs 
25093dec9fcdSqs /*ARGSUSED*/
25103dec9fcdSqs static hxge_status_t
hxge_tx_err_evnts(p_hxge_t hxgep,uint_t index,p_hxge_ldv_t ldvp,tdc_stat_t cs)25113dec9fcdSqs hxge_tx_err_evnts(p_hxge_t hxgep, uint_t index, p_hxge_ldv_t ldvp,
25123dec9fcdSqs     tdc_stat_t cs)
25133dec9fcdSqs {
25143dec9fcdSqs 	hpi_handle_t		handle;
25153dec9fcdSqs 	uint8_t			channel;
25163dec9fcdSqs 	p_tx_ring_t		*tx_rings;
25173dec9fcdSqs 	p_tx_ring_t		tx_ring_p;
25183dec9fcdSqs 	p_hxge_tx_ring_stats_t	tdc_stats;
25193dec9fcdSqs 	boolean_t		txchan_fatal = B_FALSE;
25203dec9fcdSqs 	hxge_status_t		status = HXGE_OK;
25213dec9fcdSqs 	tdc_drop_cnt_t		drop_cnt;
25223dec9fcdSqs 
25233dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "==> hxge_tx_err_evnts"));
25243dec9fcdSqs 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
25253dec9fcdSqs 	channel = ldvp->channel;
25263dec9fcdSqs 
25273dec9fcdSqs 	tx_rings = hxgep->tx_rings->rings;
25283dec9fcdSqs 	tx_ring_p = tx_rings[index];
25293dec9fcdSqs 	tdc_stats = tx_ring_p->tdc_stats;
25303dec9fcdSqs 
25313dec9fcdSqs 	/* Get the error counts if any */
25323dec9fcdSqs 	TXDMA_REG_READ64(handle, TDC_DROP_CNT, channel, &drop_cnt.value);
25333dec9fcdSqs 	tdc_stats->count_hdr_size_err += drop_cnt.bits.hdr_size_error_count;
25343dec9fcdSqs 	tdc_stats->count_runt += drop_cnt.bits.runt_count;
25353dec9fcdSqs 	tdc_stats->count_abort += drop_cnt.bits.abort_count;
25363dec9fcdSqs 
25373dec9fcdSqs 	if (cs.bits.peu_resp_err) {
25383dec9fcdSqs 		tdc_stats->peu_resp_err++;
25393dec9fcdSqs 		HXGE_FM_REPORT_ERROR(hxgep, channel,
25403dec9fcdSqs 		    HXGE_FM_EREPORT_TDMC_PEU_RESP_ERR);
25413dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
25423dec9fcdSqs 		    "==> hxge_tx_err_evnts(channel %d): "
25433dec9fcdSqs 		    "fatal error: peu_resp_err", channel));
25443dec9fcdSqs 		txchan_fatal = B_TRUE;
25453dec9fcdSqs 	}
25463dec9fcdSqs 
25473dec9fcdSqs 	if (cs.bits.pkt_size_hdr_err) {
25483dec9fcdSqs 		tdc_stats->pkt_size_hdr_err++;
25493dec9fcdSqs 		HXGE_FM_REPORT_ERROR(hxgep, channel,
25503dec9fcdSqs 		    HXGE_FM_EREPORT_TDMC_PKT_SIZE_HDR_ERR);
25513dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
25523dec9fcdSqs 		    "==> hxge_tx_err_evnts(channel %d): "
25533dec9fcdSqs 		    "fatal error: pkt_size_hdr_err", channel));
25543dec9fcdSqs 		txchan_fatal = B_TRUE;
25553dec9fcdSqs 	}
25563dec9fcdSqs 
25573dec9fcdSqs 	if (cs.bits.runt_pkt_drop_err) {
25583dec9fcdSqs 		tdc_stats->runt_pkt_drop_err++;
25593dec9fcdSqs 		HXGE_FM_REPORT_ERROR(hxgep, channel,
25603dec9fcdSqs 		    HXGE_FM_EREPORT_TDMC_RUNT_PKT_DROP_ERR);
25613dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
25623dec9fcdSqs 		    "==> hxge_tx_err_evnts(channel %d): "
25633dec9fcdSqs 		    "fatal error: runt_pkt_drop_err", channel));
25643dec9fcdSqs 		txchan_fatal = B_TRUE;
25653dec9fcdSqs 	}
25663dec9fcdSqs 
25673dec9fcdSqs 	if (cs.bits.pkt_size_err) {
25683dec9fcdSqs 		tdc_stats->pkt_size_err++;
25693dec9fcdSqs 		HXGE_FM_REPORT_ERROR(hxgep, channel,
25703dec9fcdSqs 		    HXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR);
25713dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
25723dec9fcdSqs 		    "==> hxge_tx_err_evnts(channel %d): "
25733dec9fcdSqs 		    "fatal error: pkt_size_err", channel));
25743dec9fcdSqs 		txchan_fatal = B_TRUE;
25753dec9fcdSqs 	}
25763dec9fcdSqs 
25773dec9fcdSqs 	if (cs.bits.tx_rng_oflow) {
25783dec9fcdSqs 		tdc_stats->tx_rng_oflow++;
25793dec9fcdSqs 		if (tdc_stats->tx_rng_oflow)
25803dec9fcdSqs 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
25813dec9fcdSqs 			    "==> hxge_tx_err_evnts(channel %d): "
25823dec9fcdSqs 			    "fatal error: tx_rng_oflow", channel));
25833dec9fcdSqs 	}
25843dec9fcdSqs 
25853dec9fcdSqs 	if (cs.bits.pref_par_err) {
25863dec9fcdSqs 		tdc_stats->pref_par_err++;
25873dec9fcdSqs 
25883dec9fcdSqs 		/* Get the address of parity error read data */
25893dec9fcdSqs 		TXDMA_REG_READ64(hxgep->hpi_handle, TDC_PREF_PAR_LOG,
25903dec9fcdSqs 		    channel, &tdc_stats->errlog.value);
25913dec9fcdSqs 
25923dec9fcdSqs 		HXGE_FM_REPORT_ERROR(hxgep, channel,
25933dec9fcdSqs 		    HXGE_FM_EREPORT_TDMC_PREF_PAR_ERR);
25943dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
25953dec9fcdSqs 		    "==> hxge_tx_err_evnts(channel %d): "
25963dec9fcdSqs 		    "fatal error: pref_par_err", channel));
25973dec9fcdSqs 		txchan_fatal = B_TRUE;
25983dec9fcdSqs 	}
25993dec9fcdSqs 
26003dec9fcdSqs 	if (cs.bits.tdr_pref_cpl_to) {
26013dec9fcdSqs 		tdc_stats->tdr_pref_cpl_to++;
26023dec9fcdSqs 		HXGE_FM_REPORT_ERROR(hxgep, channel,
26033dec9fcdSqs 		    HXGE_FM_EREPORT_TDMC_TDR_PREF_CPL_TO);
26043dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
26053dec9fcdSqs 		    "==> hxge_tx_err_evnts(channel %d): "
2606fd9489ceSQiyan Sun - Sun Microsystems - San Diego United States 		    "fatal error: tdr_pref_cpl_to", channel));
26073dec9fcdSqs 		txchan_fatal = B_TRUE;
26083dec9fcdSqs 	}
26093dec9fcdSqs 
26103dec9fcdSqs 	if (cs.bits.pkt_cpl_to) {
26113dec9fcdSqs 		tdc_stats->pkt_cpl_to++;
26123dec9fcdSqs 		HXGE_FM_REPORT_ERROR(hxgep, channel,
26133dec9fcdSqs 		    HXGE_FM_EREPORT_TDMC_PKT_CPL_TO);
26143dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
26153dec9fcdSqs 		    "==> hxge_tx_err_evnts(channel %d): "
26163dec9fcdSqs 		    "fatal error: pkt_cpl_to", channel));
26173dec9fcdSqs 		txchan_fatal = B_TRUE;
26183dec9fcdSqs 	}
26193dec9fcdSqs 
26203dec9fcdSqs 	if (cs.bits.invalid_sop) {
26213dec9fcdSqs 		tdc_stats->invalid_sop++;
26223dec9fcdSqs 		HXGE_FM_REPORT_ERROR(hxgep, channel,
26233dec9fcdSqs 		    HXGE_FM_EREPORT_TDMC_INVALID_SOP);
26243dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
26253dec9fcdSqs 		    "==> hxge_tx_err_evnts(channel %d): "
26263dec9fcdSqs 		    "fatal error: invalid_sop", channel));
26273dec9fcdSqs 		txchan_fatal = B_TRUE;
26283dec9fcdSqs 	}
26293dec9fcdSqs 
26303dec9fcdSqs 	if (cs.bits.unexpected_sop) {
26313dec9fcdSqs 		tdc_stats->unexpected_sop++;
26323dec9fcdSqs 		HXGE_FM_REPORT_ERROR(hxgep, channel,
26333dec9fcdSqs 		    HXGE_FM_EREPORT_TDMC_UNEXPECTED_SOP);
26343dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
26353dec9fcdSqs 		    "==> hxge_tx_err_evnts(channel %d): "
26363dec9fcdSqs 		    "fatal error: unexpected_sop", channel));
26373dec9fcdSqs 		txchan_fatal = B_TRUE;
26383dec9fcdSqs 	}
26393dec9fcdSqs 
26403dec9fcdSqs 	/* Clear error injection source in case this is an injected error */
26413dec9fcdSqs 	TXDMA_REG_WRITE64(hxgep->hpi_handle, TDC_STAT_INT_DBG, channel, 0);
26423dec9fcdSqs 
26433dec9fcdSqs 	if (txchan_fatal) {
26443dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
26453dec9fcdSqs 		    " hxge_tx_err_evnts: "
26463dec9fcdSqs 		    " fatal error on channel %d cs 0x%llx\n",
26473dec9fcdSqs 		    channel, cs.value));
26483dec9fcdSqs 		status = hxge_txdma_fatal_err_recover(hxgep, channel,
26493dec9fcdSqs 		    tx_ring_p);
26503dec9fcdSqs 		if (status == HXGE_OK) {
26513dec9fcdSqs 			FM_SERVICE_RESTORED(hxgep);
26523dec9fcdSqs 		}
26533dec9fcdSqs 	}
26543dec9fcdSqs 
26553dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "<== hxge_tx_err_evnts"));
26563dec9fcdSqs 
26573dec9fcdSqs 	return (status);
26583dec9fcdSqs }
26593dec9fcdSqs 
26603dec9fcdSqs hxge_status_t
hxge_txdma_handle_sys_errors(p_hxge_t hxgep)26613dec9fcdSqs hxge_txdma_handle_sys_errors(p_hxge_t hxgep)
26623dec9fcdSqs {
26633dec9fcdSqs 	hpi_handle_t		handle;
26643dec9fcdSqs 	hxge_status_t		status = HXGE_OK;
26653dec9fcdSqs 	tdc_fifo_err_stat_t	fifo_stat;
26663dec9fcdSqs 	hxge_tdc_sys_stats_t	*tdc_sys_stats;
26673dec9fcdSqs 
26683dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_handle_sys_errors"));
26693dec9fcdSqs 
26703dec9fcdSqs 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
26713dec9fcdSqs 
26723dec9fcdSqs 	/*
26733dec9fcdSqs 	 * The FIFO is shared by all channels.
26743dec9fcdSqs 	 * Get the status of Reorder Buffer and Reorder Table Buffer Errors
26753dec9fcdSqs 	 */
26763dec9fcdSqs 	HXGE_REG_RD64(handle, TDC_FIFO_ERR_STAT, &fifo_stat.value);
26773dec9fcdSqs 
26783dec9fcdSqs 	/*
26793dec9fcdSqs 	 * Clear the error bits. Note that writing a 1 clears the bit. Writing
26803dec9fcdSqs 	 * a 0 does nothing.
26813dec9fcdSqs 	 */
26823dec9fcdSqs 	HXGE_REG_WR64(handle, TDC_FIFO_ERR_STAT, fifo_stat.value);
26833dec9fcdSqs 
26843dec9fcdSqs 	tdc_sys_stats = &hxgep->statsp->tdc_sys_stats;
26853dec9fcdSqs 	if (fifo_stat.bits.reord_tbl_par_err) {
26863dec9fcdSqs 		tdc_sys_stats->reord_tbl_par_err++;
2687*c11cea93SToomas Soome 		HXGE_FM_REPORT_ERROR(hxgep, 0,
26883a109ad9SQiyan Sun - Sun Microsystems - San Diego United States 		    HXGE_FM_EREPORT_TDMC_REORD_TBL_PAR);
26893a109ad9SQiyan Sun - Sun Microsystems - San Diego United States 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
26903a109ad9SQiyan Sun - Sun Microsystems - San Diego United States 		    "==> hxge_txdma_handle_sys_errors: fatal error: "
26913a109ad9SQiyan Sun - Sun Microsystems - San Diego United States 		    "reord_tbl_par_err"));
26923dec9fcdSqs 	}
26933dec9fcdSqs 
26943dec9fcdSqs 	if (fifo_stat.bits.reord_buf_ded_err) {
26953dec9fcdSqs 		tdc_sys_stats->reord_buf_ded_err++;
2696*c11cea93SToomas Soome 		HXGE_FM_REPORT_ERROR(hxgep, 0,
26973dec9fcdSqs 		    HXGE_FM_EREPORT_TDMC_REORD_BUF_DED);
26983dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
26993dec9fcdSqs 		    "==> hxge_txdma_handle_sys_errors: "
27003dec9fcdSqs 		    "fatal error: reord_buf_ded_err"));
27013dec9fcdSqs 	}
27023dec9fcdSqs 
27033dec9fcdSqs 	if (fifo_stat.bits.reord_buf_sec_err) {
27043dec9fcdSqs 		tdc_sys_stats->reord_buf_sec_err++;
27053dec9fcdSqs 		if (tdc_sys_stats->reord_buf_sec_err == 1)
27063dec9fcdSqs 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
27073dec9fcdSqs 			    "==> hxge_txdma_handle_sys_errors: "
27083dec9fcdSqs 			    "reord_buf_sec_err"));
27093dec9fcdSqs 	}
27103dec9fcdSqs 
27113a109ad9SQiyan Sun - Sun Microsystems - San Diego United States 	if (fifo_stat.bits.reord_tbl_par_err ||
27123a109ad9SQiyan Sun - Sun Microsystems - San Diego United States 	    fifo_stat.bits.reord_buf_ded_err) {
27133dec9fcdSqs 		status = hxge_tx_port_fatal_err_recover(hxgep);
27143dec9fcdSqs 		if (status == HXGE_OK) {
27153dec9fcdSqs 			FM_SERVICE_RESTORED(hxgep);
27163dec9fcdSqs 		}
27173dec9fcdSqs 	}
27183dec9fcdSqs 
27193dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_handle_sys_errors"));
27203dec9fcdSqs 
27213dec9fcdSqs 	return (status);
27223dec9fcdSqs }
27233dec9fcdSqs 
27243dec9fcdSqs static hxge_status_t
hxge_txdma_fatal_err_recover(p_hxge_t hxgep,uint16_t channel,p_tx_ring_t tx_ring_p)27253dec9fcdSqs hxge_txdma_fatal_err_recover(p_hxge_t hxgep, uint16_t channel,
27263dec9fcdSqs     p_tx_ring_t tx_ring_p)
27273dec9fcdSqs {
27283dec9fcdSqs 	hpi_handle_t	handle;
27293dec9fcdSqs 	hpi_status_t	rs = HPI_SUCCESS;
27303dec9fcdSqs 	p_tx_mbox_t	tx_mbox_p;
27313dec9fcdSqs 	hxge_status_t	status = HXGE_OK;
27323dec9fcdSqs 
27333dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "==> hxge_txdma_fatal_err_recover"));
27343dec9fcdSqs 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
27353dec9fcdSqs 	    "Recovering from TxDMAChannel#%d error...", channel));
27363dec9fcdSqs 
27373dec9fcdSqs 	/*
27383dec9fcdSqs 	 * Stop the dma channel waits for the stop done. If the stop done bit
27393dec9fcdSqs 	 * is not set, then create an error.
27403dec9fcdSqs 	 */
27413dec9fcdSqs 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
27423dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "stopping txdma channel(%d)",
27433dec9fcdSqs 	    channel));
27443dec9fcdSqs 	MUTEX_ENTER(&tx_ring_p->lock);
27453dec9fcdSqs 	rs = hpi_txdma_channel_control(handle, TXDMA_STOP, channel);
27463dec9fcdSqs 	if (rs != HPI_SUCCESS) {
27473dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
27483dec9fcdSqs 		    "==> hxge_txdma_fatal_err_recover (channel %d): "
27493dec9fcdSqs 		    "stop failed ", channel));
27503dec9fcdSqs 
27513dec9fcdSqs 		goto fail;
27523dec9fcdSqs 	}
27533dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "reclaiming txdma channel(%d)",
27543dec9fcdSqs 	    channel));
27553dec9fcdSqs 	(void) hxge_txdma_reclaim(hxgep, tx_ring_p, 0);
27563dec9fcdSqs 
27573dec9fcdSqs 	/*
27583dec9fcdSqs 	 * Reset TXDMA channel
27593dec9fcdSqs 	 */
27603dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "resetting txdma channel(%d)",
27613dec9fcdSqs 	    channel));
27623dec9fcdSqs 	if ((rs = hpi_txdma_channel_control(handle, TXDMA_RESET, channel)) !=
27633dec9fcdSqs 	    HPI_SUCCESS) {
27643dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
27653dec9fcdSqs 		    "==> hxge_txdma_fatal_err_recover (channel %d)"
27663dec9fcdSqs 		    " reset channel failed 0x%x", channel, rs));
27673dec9fcdSqs 
27683dec9fcdSqs 		goto fail;
27693dec9fcdSqs 	}
27703dec9fcdSqs 	/*
27713dec9fcdSqs 	 * Reset the tail (kick) register to 0. (Hardware will not reset it. Tx
27723dec9fcdSqs 	 * overflow fatal error if tail is not set to 0 after reset!
27733dec9fcdSqs 	 */
27743dec9fcdSqs 	TXDMA_REG_WRITE64(handle, TDC_TDR_KICK, channel, 0);
27753dec9fcdSqs 
27763dec9fcdSqs 	/*
27773dec9fcdSqs 	 * Restart TXDMA channel
27783dec9fcdSqs 	 *
27793dec9fcdSqs 	 * Initialize the TXDMA channel specific FZC control configurations.
27803dec9fcdSqs 	 * These FZC registers are pertaining to each TX channel (i.e. logical
27813dec9fcdSqs 	 * pages).
27823dec9fcdSqs 	 */
27833dec9fcdSqs 	tx_mbox_p = hxge_txdma_get_mbox(hxgep, channel);
27843dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "restarting txdma channel(%d)",
27853dec9fcdSqs 	    channel));
27863dec9fcdSqs 	status = hxge_init_fzc_txdma_channel(hxgep, channel,
27873dec9fcdSqs 	    tx_ring_p, tx_mbox_p);
27883dec9fcdSqs 	if (status != HXGE_OK)
27893dec9fcdSqs 		goto fail;
27903dec9fcdSqs 
27913dec9fcdSqs 	/*
27923dec9fcdSqs 	 * Initialize the event masks.
27933dec9fcdSqs 	 */
27943dec9fcdSqs 	tx_ring_p->tx_evmask.value = 0;
27953dec9fcdSqs 	status = hxge_init_txdma_channel_event_mask(hxgep, channel,
27963dec9fcdSqs 	    &tx_ring_p->tx_evmask);
27973dec9fcdSqs 	if (status != HXGE_OK)
27983dec9fcdSqs 		goto fail;
27993dec9fcdSqs 
28003dec9fcdSqs 	tx_ring_p->wr_index_wrap = B_FALSE;
28013dec9fcdSqs 	tx_ring_p->wr_index = 0;
28023dec9fcdSqs 	tx_ring_p->rd_index = 0;
28033dec9fcdSqs 
28043dec9fcdSqs 	/*
28053dec9fcdSqs 	 * Load TXDMA descriptors, buffers, mailbox, initialise the DMA
28063dec9fcdSqs 	 * channels and enable each DMA channel.
28073dec9fcdSqs 	 */
28083dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "enabling txdma channel(%d)",
28093dec9fcdSqs 	    channel));
28103dec9fcdSqs 	status = hxge_enable_txdma_channel(hxgep, channel,
28113dec9fcdSqs 	    tx_ring_p, tx_mbox_p);
28123dec9fcdSqs 	MUTEX_EXIT(&tx_ring_p->lock);
28133dec9fcdSqs 	if (status != HXGE_OK)
28143dec9fcdSqs 		goto fail;
28153dec9fcdSqs 
28163dec9fcdSqs 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
28173dec9fcdSqs 	    "Recovery Successful, TxDMAChannel#%d Restored", channel));
28183dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "==> hxge_txdma_fatal_err_recover"));
28193dec9fcdSqs 
28203dec9fcdSqs 	return (HXGE_OK);
28213dec9fcdSqs 
28223dec9fcdSqs fail:
28233dec9fcdSqs 	MUTEX_EXIT(&tx_ring_p->lock);
28243dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL,
28253dec9fcdSqs 	    "hxge_txdma_fatal_err_recover (channel %d): "
28263dec9fcdSqs 	    "failed to recover this txdma channel", channel));
28273dec9fcdSqs 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed"));
28283dec9fcdSqs 
28293dec9fcdSqs 	return (status);
28303dec9fcdSqs }
28313dec9fcdSqs 
28323dec9fcdSqs static hxge_status_t
hxge_tx_port_fatal_err_recover(p_hxge_t hxgep)28333dec9fcdSqs hxge_tx_port_fatal_err_recover(p_hxge_t hxgep)
28343dec9fcdSqs {
28353dec9fcdSqs 	hpi_handle_t	handle;
28363dec9fcdSqs 	hpi_status_t	rs = HPI_SUCCESS;
28373dec9fcdSqs 	hxge_status_t	status = HXGE_OK;
28383dec9fcdSqs 	p_tx_ring_t	*tx_desc_rings;
28393dec9fcdSqs 	p_tx_rings_t	tx_rings;
28403dec9fcdSqs 	p_tx_ring_t	tx_ring_p;
28413dec9fcdSqs 	int		i, ndmas;
28423dec9fcdSqs 	uint16_t	channel;
28433dec9fcdSqs 	block_reset_t	reset_reg;
28443dec9fcdSqs 
28453dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL,
28463dec9fcdSqs 	    "==> hxge_tx_port_fatal_err_recover"));
28473dec9fcdSqs 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
28483dec9fcdSqs 	    "Recovering from TxPort error..."));
28493dec9fcdSqs 
28503dec9fcdSqs 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
28513dec9fcdSqs 
28523dec9fcdSqs 	/* Reset TDC block from PEU for this fatal error */
28533dec9fcdSqs 	reset_reg.value = 0;
28543dec9fcdSqs 	reset_reg.bits.tdc_rst = 1;
28553dec9fcdSqs 	HXGE_REG_WR32(handle, BLOCK_RESET, reset_reg.value);
28563dec9fcdSqs 
28573dec9fcdSqs 	HXGE_DELAY(1000);
28583dec9fcdSqs 
28593dec9fcdSqs 	/*
28603dec9fcdSqs 	 * Stop the dma channel waits for the stop done. If the stop done bit
28613dec9fcdSqs 	 * is not set, then create an error.
28623dec9fcdSqs 	 */
28633dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "stopping all DMA channels..."));
28643dec9fcdSqs 
28653dec9fcdSqs 	tx_rings = hxgep->tx_rings;
28663dec9fcdSqs 	tx_desc_rings = tx_rings->rings;
28673dec9fcdSqs 	ndmas = tx_rings->ndmas;
28683dec9fcdSqs 
28693dec9fcdSqs 	for (i = 0; i < ndmas; i++) {
28703dec9fcdSqs 		if (tx_desc_rings[i] == NULL) {
28713dec9fcdSqs 			continue;
28723dec9fcdSqs 		}
28733dec9fcdSqs 		tx_ring_p = tx_rings->rings[i];
28743dec9fcdSqs 		MUTEX_ENTER(&tx_ring_p->lock);
28753dec9fcdSqs 	}
28763dec9fcdSqs 
28773dec9fcdSqs 	for (i = 0; i < ndmas; i++) {
28783dec9fcdSqs 		if (tx_desc_rings[i] == NULL) {
28793dec9fcdSqs 			continue;
28803dec9fcdSqs 		}
28813dec9fcdSqs 		channel = tx_desc_rings[i]->tdc;
28823dec9fcdSqs 		tx_ring_p = tx_rings->rings[i];
28833dec9fcdSqs 		rs = hpi_txdma_channel_control(handle, TXDMA_STOP, channel);
28843dec9fcdSqs 		if (rs != HPI_SUCCESS) {
28853dec9fcdSqs 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
28863dec9fcdSqs 			    "==> hxge_txdma_fatal_err_recover (channel %d): "
28873dec9fcdSqs 			    "stop failed ", channel));
28883dec9fcdSqs 
28893dec9fcdSqs 			goto fail;
28903dec9fcdSqs 		}
28913dec9fcdSqs 	}
28923dec9fcdSqs 
28933dec9fcdSqs 	/*
28943dec9fcdSqs 	 * Do reclaim on all of th DMAs.
28953dec9fcdSqs 	 */
28963dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "reclaiming all DMA channels..."));
28973dec9fcdSqs 	for (i = 0; i < ndmas; i++) {
28983dec9fcdSqs 		if (tx_desc_rings[i] == NULL) {
28993dec9fcdSqs 			continue;
29003dec9fcdSqs 		}
29013dec9fcdSqs 		tx_ring_p = tx_rings->rings[i];
29023dec9fcdSqs 		(void) hxge_txdma_reclaim(hxgep, tx_ring_p, 0);
29033dec9fcdSqs 	}
29043dec9fcdSqs 
29053dec9fcdSqs 	/* Restart the TDC */
29063dec9fcdSqs 	if ((status = hxge_txdma_hw_start(hxgep)) != HXGE_OK)
29073dec9fcdSqs 		goto fail;
29083dec9fcdSqs 
29093dec9fcdSqs 	for (i = 0; i < ndmas; i++) {
29103dec9fcdSqs 		if (tx_desc_rings[i] == NULL) {
29113dec9fcdSqs 			continue;
29123dec9fcdSqs 		}
29133dec9fcdSqs 		tx_ring_p = tx_rings->rings[i];
29143dec9fcdSqs 		MUTEX_EXIT(&tx_ring_p->lock);
29153dec9fcdSqs 	}
29163dec9fcdSqs 
29173dec9fcdSqs 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
29183dec9fcdSqs 	    "Recovery Successful, TxPort Restored"));
29193dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL,
29203dec9fcdSqs 	    "<== hxge_tx_port_fatal_err_recover"));
29213dec9fcdSqs 	return (HXGE_OK);
29223dec9fcdSqs 
29233dec9fcdSqs fail:
29243dec9fcdSqs 	for (i = 0; i < ndmas; i++) {
29253dec9fcdSqs 		if (tx_desc_rings[i] == NULL) {
29263dec9fcdSqs 			continue;
29273dec9fcdSqs 		}
29283dec9fcdSqs 		tx_ring_p = tx_rings->rings[i];
29293dec9fcdSqs 		MUTEX_EXIT(&tx_ring_p->lock);
29303dec9fcdSqs 	}
29313dec9fcdSqs 
29323dec9fcdSqs 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed"));
29333dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL,
29343dec9fcdSqs 	    "hxge_txdma_fatal_err_recover (channel %d): "
29353dec9fcdSqs 	    "failed to recover this txdma channel"));
29363dec9fcdSqs 
29373dec9fcdSqs 	return (status);
29383dec9fcdSqs }
2939