144961713Sgirish /*
244961713Sgirish  * CDDL HEADER START
344961713Sgirish  *
444961713Sgirish  * The contents of this file are subject to the terms of the
544961713Sgirish  * Common Development and Distribution License (the "License").
644961713Sgirish  * You may not use this file except in compliance with the License.
744961713Sgirish  *
844961713Sgirish  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
944961713Sgirish  * or http://www.opensolaris.org/os/licensing.
1044961713Sgirish  * See the License for the specific language governing permissions
1144961713Sgirish  * and limitations under the License.
1244961713Sgirish  *
1344961713Sgirish  * When distributing Covered Code, include this CDDL HEADER in each
1444961713Sgirish  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
1544961713Sgirish  * If applicable, add the following below this CDDL HEADER, with the
1644961713Sgirish  * fields enclosed by brackets "[]" replaced with your own identifying
1744961713Sgirish  * information: Portions Copyright [yyyy] [name of copyright owner]
1844961713Sgirish  *
1944961713Sgirish  * CDDL HEADER END
2044961713Sgirish  */
2144961713Sgirish /*
22*3e82a89eSmisaki  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
2344961713Sgirish  * Use is subject to license terms.
2444961713Sgirish  */
2544961713Sgirish 
2644961713Sgirish #pragma ident	"%Z%%M%	%I%	%E% SMI"
2744961713Sgirish 
2844961713Sgirish #include <sys/nxge/nxge_impl.h>
2944961713Sgirish #include <sys/nxge/nxge_rxdma.h>
3044961713Sgirish 
3144961713Sgirish #define	NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp)	\
3244961713Sgirish 	(rdcgrp + nxgep->pt_config.hw_config.start_rdc_grpid)
3344961713Sgirish #define	NXGE_ACTUAL_RDC(nxgep, rdc)	\
3444961713Sgirish 	(rdc + nxgep->pt_config.hw_config.start_rdc)
3544961713Sgirish 
3644961713Sgirish /*
3744961713Sgirish  * Globals: tunable parameters (/etc/system or adb)
3844961713Sgirish  *
3944961713Sgirish  */
4044961713Sgirish extern uint32_t nxge_rbr_size;
4144961713Sgirish extern uint32_t nxge_rcr_size;
4244961713Sgirish extern uint32_t	nxge_rbr_spare_size;
4344961713Sgirish 
4444961713Sgirish extern uint32_t nxge_mblks_pending;
4544961713Sgirish 
4644961713Sgirish /*
4744961713Sgirish  * Tunable to reduce the amount of time spent in the
4844961713Sgirish  * ISR doing Rx Processing.
4944961713Sgirish  */
5044961713Sgirish extern uint32_t nxge_max_rx_pkts;
5144961713Sgirish boolean_t nxge_jumbo_enable;
5244961713Sgirish 
5344961713Sgirish /*
5444961713Sgirish  * Tunables to manage the receive buffer blocks.
5544961713Sgirish  *
5644961713Sgirish  * nxge_rx_threshold_hi: copy all buffers.
5744961713Sgirish  * nxge_rx_bcopy_size_type: receive buffer block size type.
5844961713Sgirish  * nxge_rx_threshold_lo: copy only up to tunable block size type.
5944961713Sgirish  */
6044961713Sgirish extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi;
6144961713Sgirish extern nxge_rxbuf_type_t nxge_rx_buf_size_type;
6244961713Sgirish extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo;
6344961713Sgirish 
6444961713Sgirish static nxge_status_t nxge_map_rxdma(p_nxge_t);
6544961713Sgirish static void nxge_unmap_rxdma(p_nxge_t);
6644961713Sgirish 
6744961713Sgirish static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t);
6844961713Sgirish static void nxge_rxdma_hw_stop_common(p_nxge_t);
6944961713Sgirish 
7044961713Sgirish static nxge_status_t nxge_rxdma_hw_start(p_nxge_t);
7144961713Sgirish static void nxge_rxdma_hw_stop(p_nxge_t);
7244961713Sgirish 
7344961713Sgirish static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t,
7444961713Sgirish     p_nxge_dma_common_t *,  p_rx_rbr_ring_t *,
7544961713Sgirish     uint32_t,
7644961713Sgirish     p_nxge_dma_common_t *, p_rx_rcr_ring_t *,
7744961713Sgirish     p_rx_mbox_t *);
7844961713Sgirish static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t,
7944961713Sgirish     p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
8044961713Sgirish 
8144961713Sgirish static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t,
8244961713Sgirish     uint16_t,
8344961713Sgirish     p_nxge_dma_common_t *, p_rx_rbr_ring_t *,
8444961713Sgirish     p_rx_rcr_ring_t *, p_rx_mbox_t *);
8544961713Sgirish static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t,
8644961713Sgirish     p_rx_rcr_ring_t, p_rx_mbox_t);
8744961713Sgirish 
8844961713Sgirish static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t,
8944961713Sgirish     uint16_t,
9044961713Sgirish     p_nxge_dma_common_t *,
9144961713Sgirish     p_rx_rbr_ring_t *, uint32_t);
9244961713Sgirish static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t,
9344961713Sgirish     p_rx_rbr_ring_t);
9444961713Sgirish 
9544961713Sgirish static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t,
9644961713Sgirish     p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
9744961713Sgirish static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t);
9844961713Sgirish 
9944961713Sgirish mblk_t *
10044961713Sgirish nxge_rx_pkts(p_nxge_t, uint_t, p_nxge_ldv_t,
10144961713Sgirish     p_rx_rcr_ring_t *, rx_dma_ctl_stat_t);
10244961713Sgirish 
10344961713Sgirish static void nxge_receive_packet(p_nxge_t,
10444961713Sgirish 	p_rx_rcr_ring_t,
10544961713Sgirish 	p_rcr_entry_t,
10644961713Sgirish 	boolean_t *,
10744961713Sgirish 	mblk_t **, mblk_t **);
10844961713Sgirish 
10944961713Sgirish nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t);
11044961713Sgirish 
11144961713Sgirish static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t);
11244961713Sgirish static void nxge_freeb(p_rx_msg_t);
11344961713Sgirish static void nxge_rx_pkts_vring(p_nxge_t, uint_t,
11444961713Sgirish     p_nxge_ldv_t, rx_dma_ctl_stat_t);
11544961713Sgirish static nxge_status_t nxge_rx_err_evnts(p_nxge_t, uint_t,
11644961713Sgirish 				p_nxge_ldv_t, rx_dma_ctl_stat_t);
11744961713Sgirish 
11844961713Sgirish static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t,
11944961713Sgirish 				uint32_t, uint32_t);
12044961713Sgirish 
12144961713Sgirish static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t,
12244961713Sgirish     p_rx_rbr_ring_t);
12344961713Sgirish 
12444961713Sgirish 
12544961713Sgirish static nxge_status_t
12644961713Sgirish nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t);
12744961713Sgirish 
12844961713Sgirish nxge_status_t
12944961713Sgirish nxge_rx_port_fatal_err_recover(p_nxge_t);
13044961713Sgirish 
13144961713Sgirish nxge_status_t
13244961713Sgirish nxge_init_rxdma_channels(p_nxge_t nxgep)
13344961713Sgirish {
13444961713Sgirish 	nxge_status_t	status = NXGE_OK;
13544961713Sgirish 
13644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels"));
13744961713Sgirish 
13844961713Sgirish 	status = nxge_map_rxdma(nxgep);
13944961713Sgirish 	if (status != NXGE_OK) {
14044961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
14144961713Sgirish 			"<== nxge_init_rxdma: status 0x%x", status));
14244961713Sgirish 		return (status);
14344961713Sgirish 	}
14444961713Sgirish 
14544961713Sgirish 	status = nxge_rxdma_hw_start_common(nxgep);
14644961713Sgirish 	if (status != NXGE_OK) {
14744961713Sgirish 		nxge_unmap_rxdma(nxgep);
14844961713Sgirish 	}
14944961713Sgirish 
15044961713Sgirish 	status = nxge_rxdma_hw_start(nxgep);
15144961713Sgirish 	if (status != NXGE_OK) {
15244961713Sgirish 		nxge_unmap_rxdma(nxgep);
15344961713Sgirish 	}
15444961713Sgirish 
15544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
15644961713Sgirish 		"<== nxge_init_rxdma_channels: status 0x%x", status));
15744961713Sgirish 
15844961713Sgirish 	return (status);
15944961713Sgirish }
16044961713Sgirish 
16144961713Sgirish void
16244961713Sgirish nxge_uninit_rxdma_channels(p_nxge_t nxgep)
16344961713Sgirish {
16444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels"));
16544961713Sgirish 
16644961713Sgirish 	nxge_rxdma_hw_stop(nxgep);
16744961713Sgirish 	nxge_rxdma_hw_stop_common(nxgep);
16844961713Sgirish 	nxge_unmap_rxdma(nxgep);
16944961713Sgirish 
17044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
17144961713Sgirish 		"<== nxge_uinit_rxdma_channels"));
17244961713Sgirish }
17344961713Sgirish 
17444961713Sgirish nxge_status_t
17544961713Sgirish nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel)
17644961713Sgirish {
17744961713Sgirish 	npi_handle_t		handle;
17844961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
17944961713Sgirish 	nxge_status_t		status = NXGE_OK;
18044961713Sgirish 
18144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel"));
18244961713Sgirish 
18344961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
18444961713Sgirish 	rs = npi_rxdma_cfg_rdc_reset(handle, channel);
18544961713Sgirish 
18644961713Sgirish 	if (rs != NPI_SUCCESS) {
18744961713Sgirish 		status = NXGE_ERROR | rs;
18844961713Sgirish 	}
18944961713Sgirish 
19044961713Sgirish 	return (status);
19144961713Sgirish }
19244961713Sgirish 
19344961713Sgirish void
19444961713Sgirish nxge_rxdma_regs_dump_channels(p_nxge_t nxgep)
19544961713Sgirish {
19644961713Sgirish 	int			i, ndmas;
19744961713Sgirish 	uint16_t		channel;
19844961713Sgirish 	p_rx_rbr_rings_t 	rx_rbr_rings;
19944961713Sgirish 	p_rx_rbr_ring_t		*rbr_rings;
20044961713Sgirish 	npi_handle_t		handle;
20144961713Sgirish 
20244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels"));
20344961713Sgirish 
20444961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
20544961713Sgirish 	(void) npi_rxdma_dump_fzc_regs(handle);
20644961713Sgirish 
20744961713Sgirish 	rx_rbr_rings = nxgep->rx_rbr_rings;
20844961713Sgirish 	if (rx_rbr_rings == NULL) {
20944961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
21044961713Sgirish 			"<== nxge_rxdma_regs_dump_channels: "
21144961713Sgirish 			"NULL ring pointer"));
21244961713Sgirish 		return;
21344961713Sgirish 	}
21444961713Sgirish 	if (rx_rbr_rings->rbr_rings == NULL) {
21544961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
21644961713Sgirish 			"<== nxge_rxdma_regs_dump_channels: "
21744961713Sgirish 			" NULL rbr rings pointer"));
21844961713Sgirish 		return;
21944961713Sgirish 	}
22044961713Sgirish 
22144961713Sgirish 	ndmas = rx_rbr_rings->ndmas;
22244961713Sgirish 	if (!ndmas) {
22344961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
22444961713Sgirish 			"<== nxge_rxdma_regs_dump_channels: no channel"));
22544961713Sgirish 		return;
22644961713Sgirish 	}
22744961713Sgirish 
22844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
22944961713Sgirish 		"==> nxge_rxdma_regs_dump_channels (ndmas %d)", ndmas));
23044961713Sgirish 
23144961713Sgirish 	rbr_rings = rx_rbr_rings->rbr_rings;
23244961713Sgirish 	for (i = 0; i < ndmas; i++) {
23344961713Sgirish 		if (rbr_rings == NULL || rbr_rings[i] == NULL) {
23444961713Sgirish 			continue;
23544961713Sgirish 		}
23644961713Sgirish 		channel = rbr_rings[i]->rdc;
23744961713Sgirish 		(void) nxge_dump_rxdma_channel(nxgep, channel);
23844961713Sgirish 	}
23944961713Sgirish 
24044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump"));
24144961713Sgirish 
24244961713Sgirish }
24344961713Sgirish 
24444961713Sgirish nxge_status_t
24544961713Sgirish nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel)
24644961713Sgirish {
24744961713Sgirish 	npi_handle_t		handle;
24844961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
24944961713Sgirish 	nxge_status_t		status = NXGE_OK;
25044961713Sgirish 
25144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel"));
25244961713Sgirish 
25344961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
25444961713Sgirish 	rs = npi_rxdma_dump_rdc_regs(handle, channel);
25544961713Sgirish 
25644961713Sgirish 	if (rs != NPI_SUCCESS) {
25744961713Sgirish 		status = NXGE_ERROR | rs;
25844961713Sgirish 	}
25944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel"));
26044961713Sgirish 	return (status);
26144961713Sgirish }
26244961713Sgirish 
26344961713Sgirish nxge_status_t
26444961713Sgirish nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
26544961713Sgirish     p_rx_dma_ent_msk_t mask_p)
26644961713Sgirish {
26744961713Sgirish 	npi_handle_t		handle;
26844961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
26944961713Sgirish 	nxge_status_t		status = NXGE_OK;
27044961713Sgirish 
27144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
27244961713Sgirish 		"<== nxge_init_rxdma_channel_event_mask"));
27344961713Sgirish 
27444961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
27544961713Sgirish 	rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p);
27644961713Sgirish 	if (rs != NPI_SUCCESS) {
27744961713Sgirish 		status = NXGE_ERROR | rs;
27844961713Sgirish 	}
27944961713Sgirish 
28044961713Sgirish 	return (status);
28144961713Sgirish }
28244961713Sgirish 
28344961713Sgirish nxge_status_t
28444961713Sgirish nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
28544961713Sgirish     p_rx_dma_ctl_stat_t cs_p)
28644961713Sgirish {
28744961713Sgirish 	npi_handle_t		handle;
28844961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
28944961713Sgirish 	nxge_status_t		status = NXGE_OK;
29044961713Sgirish 
29144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
29244961713Sgirish 		"<== nxge_init_rxdma_channel_cntl_stat"));
29344961713Sgirish 
29444961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
29544961713Sgirish 	rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p);
29644961713Sgirish 
29744961713Sgirish 	if (rs != NPI_SUCCESS) {
29844961713Sgirish 		status = NXGE_ERROR | rs;
29944961713Sgirish 	}
30044961713Sgirish 
30144961713Sgirish 	return (status);
30244961713Sgirish }
30344961713Sgirish 
30444961713Sgirish nxge_status_t
30544961713Sgirish nxge_rxdma_cfg_rdcgrp_default_rdc(p_nxge_t nxgep, uint8_t rdcgrp,
30644961713Sgirish 				    uint8_t rdc)
30744961713Sgirish {
30844961713Sgirish 	npi_handle_t		handle;
30944961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
31044961713Sgirish 	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
31144961713Sgirish 	p_nxge_rdc_grp_t	rdc_grp_p;
31244961713Sgirish 	uint8_t actual_rdcgrp, actual_rdc;
31344961713Sgirish 
31444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
31544961713Sgirish 			    " ==> nxge_rxdma_cfg_rdcgrp_default_rdc"));
31644961713Sgirish 	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
31744961713Sgirish 
31844961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
31944961713Sgirish 
32044961713Sgirish 	rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp];
32144961713Sgirish 	rdc_grp_p->rdc[0] = rdc;
32244961713Sgirish 
32344961713Sgirish 	actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp);
32444961713Sgirish 	actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc);
32544961713Sgirish 
32644961713Sgirish 	rs = npi_rxdma_cfg_rdc_table_default_rdc(handle, actual_rdcgrp,
32744961713Sgirish 							    actual_rdc);
32844961713Sgirish 
32944961713Sgirish 	if (rs != NPI_SUCCESS) {
33044961713Sgirish 		return (NXGE_ERROR | rs);
33144961713Sgirish 	}
33244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
33344961713Sgirish 			    " <== nxge_rxdma_cfg_rdcgrp_default_rdc"));
33444961713Sgirish 	return (NXGE_OK);
33544961713Sgirish }
33644961713Sgirish 
33744961713Sgirish nxge_status_t
33844961713Sgirish nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc)
33944961713Sgirish {
34044961713Sgirish 	npi_handle_t		handle;
34144961713Sgirish 
34244961713Sgirish 	uint8_t actual_rdc;
34344961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
34444961713Sgirish 
34544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
34644961713Sgirish 			    " ==> nxge_rxdma_cfg_port_default_rdc"));
34744961713Sgirish 
34844961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
34944961713Sgirish 	actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc);
35044961713Sgirish 	rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc);
35144961713Sgirish 
35244961713Sgirish 
35344961713Sgirish 	if (rs != NPI_SUCCESS) {
35444961713Sgirish 		return (NXGE_ERROR | rs);
35544961713Sgirish 	}
35644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
35744961713Sgirish 			    " <== nxge_rxdma_cfg_port_default_rdc"));
35844961713Sgirish 
35944961713Sgirish 	return (NXGE_OK);
36044961713Sgirish }
36144961713Sgirish 
36244961713Sgirish nxge_status_t
36344961713Sgirish nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel,
36444961713Sgirish 				    uint16_t pkts)
36544961713Sgirish {
36644961713Sgirish 	npi_status_t	rs = NPI_SUCCESS;
36744961713Sgirish 	npi_handle_t	handle;
36844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
36944961713Sgirish 			    " ==> nxge_rxdma_cfg_rcr_threshold"));
37044961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
37144961713Sgirish 
37244961713Sgirish 	rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts);
37344961713Sgirish 
37444961713Sgirish 	if (rs != NPI_SUCCESS) {
37544961713Sgirish 		return (NXGE_ERROR | rs);
37644961713Sgirish 	}
37744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold"));
37844961713Sgirish 	return (NXGE_OK);
37944961713Sgirish }
38044961713Sgirish 
38144961713Sgirish nxge_status_t
38244961713Sgirish nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel,
38344961713Sgirish 			    uint16_t tout, uint8_t enable)
38444961713Sgirish {
38544961713Sgirish 	npi_status_t	rs = NPI_SUCCESS;
38644961713Sgirish 	npi_handle_t	handle;
38744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout"));
38844961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
38944961713Sgirish 	if (enable == 0) {
39044961713Sgirish 		rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel);
39144961713Sgirish 	} else {
39244961713Sgirish 		rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
39344961713Sgirish 							    tout);
39444961713Sgirish 	}
39544961713Sgirish 
39644961713Sgirish 	if (rs != NPI_SUCCESS) {
39744961713Sgirish 		return (NXGE_ERROR | rs);
39844961713Sgirish 	}
39944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout"));
40044961713Sgirish 	return (NXGE_OK);
40144961713Sgirish }
40244961713Sgirish 
40344961713Sgirish nxge_status_t
40444961713Sgirish nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
40544961713Sgirish     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
40644961713Sgirish {
40744961713Sgirish 	npi_handle_t		handle;
40844961713Sgirish 	rdc_desc_cfg_t 		rdc_desc;
40944961713Sgirish 	p_rcrcfig_b_t		cfgb_p;
41044961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
41144961713Sgirish 
41244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel"));
41344961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
41444961713Sgirish 	/*
41544961713Sgirish 	 * Use configuration data composed at init time.
41644961713Sgirish 	 * Write to hardware the receive ring configurations.
41744961713Sgirish 	 */
41844961713Sgirish 	rdc_desc.mbox_enable = 1;
41944961713Sgirish 	rdc_desc.mbox_addr = mbox_p->mbox_addr;
42044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
42144961713Sgirish 		"==> nxge_enable_rxdma_channel: mboxp $%p($%p)",
42244961713Sgirish 		mbox_p->mbox_addr, rdc_desc.mbox_addr));
42344961713Sgirish 
42444961713Sgirish 	rdc_desc.rbr_len = rbr_p->rbb_max;
42544961713Sgirish 	rdc_desc.rbr_addr = rbr_p->rbr_addr;
42644961713Sgirish 
42744961713Sgirish 	switch (nxgep->rx_bksize_code) {
42844961713Sgirish 	case RBR_BKSIZE_4K:
42944961713Sgirish 		rdc_desc.page_size = SIZE_4KB;
43044961713Sgirish 		break;
43144961713Sgirish 	case RBR_BKSIZE_8K:
43244961713Sgirish 		rdc_desc.page_size = SIZE_8KB;
43344961713Sgirish 		break;
43444961713Sgirish 	case RBR_BKSIZE_16K:
43544961713Sgirish 		rdc_desc.page_size = SIZE_16KB;
43644961713Sgirish 		break;
43744961713Sgirish 	case RBR_BKSIZE_32K:
43844961713Sgirish 		rdc_desc.page_size = SIZE_32KB;
43944961713Sgirish 		break;
44044961713Sgirish 	}
44144961713Sgirish 
44244961713Sgirish 	rdc_desc.size0 = rbr_p->npi_pkt_buf_size0;
44344961713Sgirish 	rdc_desc.valid0 = 1;
44444961713Sgirish 
44544961713Sgirish 	rdc_desc.size1 = rbr_p->npi_pkt_buf_size1;
44644961713Sgirish 	rdc_desc.valid1 = 1;
44744961713Sgirish 
44844961713Sgirish 	rdc_desc.size2 = rbr_p->npi_pkt_buf_size2;
44944961713Sgirish 	rdc_desc.valid2 = 1;
45044961713Sgirish 
45144961713Sgirish 	rdc_desc.full_hdr = rcr_p->full_hdr_flag;
45244961713Sgirish 	rdc_desc.offset = rcr_p->sw_priv_hdr_len;
45344961713Sgirish 
45444961713Sgirish 	rdc_desc.rcr_len = rcr_p->comp_size;
45544961713Sgirish 	rdc_desc.rcr_addr = rcr_p->rcr_addr;
45644961713Sgirish 
45744961713Sgirish 	cfgb_p = &(rcr_p->rcr_cfgb);
45844961713Sgirish 	rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres;
45944961713Sgirish 	rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout;
46044961713Sgirish 	rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout;
46144961713Sgirish 
46244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: "
46344961713Sgirish 		"rbr_len qlen %d pagesize code %d rcr_len %d",
46444961713Sgirish 		rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len));
46544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: "
46644961713Sgirish 		"size 0 %d size 1 %d size 2 %d",
46744961713Sgirish 		rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1,
46844961713Sgirish 		rbr_p->npi_pkt_buf_size2));
46944961713Sgirish 
47044961713Sgirish 	rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc);
47144961713Sgirish 	if (rs != NPI_SUCCESS) {
47244961713Sgirish 		return (NXGE_ERROR | rs);
47344961713Sgirish 	}
47444961713Sgirish 
47544961713Sgirish 	/*
47644961713Sgirish 	 * Enable the timeout and threshold.
47744961713Sgirish 	 */
47844961713Sgirish 	rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel,
47944961713Sgirish 			rdc_desc.rcr_threshold);
48044961713Sgirish 	if (rs != NPI_SUCCESS) {
48144961713Sgirish 		return (NXGE_ERROR | rs);
48244961713Sgirish 	}
48344961713Sgirish 
48444961713Sgirish 	rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
48544961713Sgirish 			rdc_desc.rcr_timeout);
48644961713Sgirish 	if (rs != NPI_SUCCESS) {
48744961713Sgirish 		return (NXGE_ERROR | rs);
48844961713Sgirish 	}
48944961713Sgirish 
49044961713Sgirish 	/* Enable the DMA */
49144961713Sgirish 	rs = npi_rxdma_cfg_rdc_enable(handle, channel);
49244961713Sgirish 	if (rs != NPI_SUCCESS) {
49344961713Sgirish 		return (NXGE_ERROR | rs);
49444961713Sgirish 	}
49544961713Sgirish 
49644961713Sgirish 	/* Kick the DMA engine. */
49744961713Sgirish 	npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max);
49844961713Sgirish 	/* Clear the rbr empty bit */
49944961713Sgirish 	(void) npi_rxdma_channel_rbr_empty_clear(handle, channel);
50044961713Sgirish 
50144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel"));
50244961713Sgirish 
50344961713Sgirish 	return (NXGE_OK);
50444961713Sgirish }
50544961713Sgirish 
50644961713Sgirish nxge_status_t
50744961713Sgirish nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel)
50844961713Sgirish {
50944961713Sgirish 	npi_handle_t		handle;
51044961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
51144961713Sgirish 
51244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel"));
51344961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
51444961713Sgirish 
51544961713Sgirish 	/* disable the DMA */
51644961713Sgirish 	rs = npi_rxdma_cfg_rdc_disable(handle, channel);
51744961713Sgirish 	if (rs != NPI_SUCCESS) {
51844961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
51944961713Sgirish 			"<== nxge_disable_rxdma_channel:failed (0x%x)",
52044961713Sgirish 			rs));
52144961713Sgirish 		return (NXGE_ERROR | rs);
52244961713Sgirish 	}
52344961713Sgirish 
52444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel"));
52544961713Sgirish 	return (NXGE_OK);
52644961713Sgirish }
52744961713Sgirish 
52844961713Sgirish nxge_status_t
52944961713Sgirish nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel)
53044961713Sgirish {
53144961713Sgirish 	npi_handle_t		handle;
53244961713Sgirish 	nxge_status_t		status = NXGE_OK;
53344961713Sgirish 
53444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
53544961713Sgirish 		"<== nxge_init_rxdma_channel_rcrflush"));
53644961713Sgirish 
53744961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
53844961713Sgirish 	npi_rxdma_rdc_rcr_flush(handle, channel);
53944961713Sgirish 
54044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
54144961713Sgirish 		"<== nxge_init_rxdma_channel_rcrflsh"));
54244961713Sgirish 	return (status);
54344961713Sgirish 
54444961713Sgirish }
54544961713Sgirish 
54644961713Sgirish #define	MID_INDEX(l, r) ((r + l + 1) >> 1)
54744961713Sgirish 
54844961713Sgirish #define	TO_LEFT -1
54944961713Sgirish #define	TO_RIGHT 1
55044961713Sgirish #define	BOTH_RIGHT (TO_RIGHT + TO_RIGHT)
55144961713Sgirish #define	BOTH_LEFT (TO_LEFT + TO_LEFT)
55244961713Sgirish #define	IN_MIDDLE (TO_RIGHT + TO_LEFT)
55344961713Sgirish #define	NO_HINT 0xffffffff
55444961713Sgirish 
55544961713Sgirish /*ARGSUSED*/
55644961713Sgirish nxge_status_t
55744961713Sgirish nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p,
558a3c5bd6dSspeer 	uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp,
559a3c5bd6dSspeer 	uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index)
56044961713Sgirish {
56144961713Sgirish 	int			bufsize;
56244961713Sgirish 	uint64_t		pktbuf_pp;
56344961713Sgirish 	uint64_t 		dvma_addr;
56444961713Sgirish 	rxring_info_t 		*ring_info;
56544961713Sgirish 	int 			base_side, end_side;
56644961713Sgirish 	int 			r_index, l_index, anchor_index;
56744961713Sgirish 	int 			found, search_done;
56844961713Sgirish 	uint32_t offset, chunk_size, block_size, page_size_mask;
56944961713Sgirish 	uint32_t chunk_index, block_index, total_index;
57044961713Sgirish 	int 			max_iterations, iteration;
57144961713Sgirish 	rxbuf_index_info_t 	*bufinfo;
57244961713Sgirish 
57344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp"));
57444961713Sgirish 
57544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
57644961713Sgirish 		"==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d",
57744961713Sgirish 		pkt_buf_addr_pp,
57844961713Sgirish 		pktbufsz_type));
579adfcba55Sjoycey #if defined(__i386)
580adfcba55Sjoycey 	pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp;
581adfcba55Sjoycey #else
58244961713Sgirish 	pktbuf_pp = (uint64_t)pkt_buf_addr_pp;
583adfcba55Sjoycey #endif
58444961713Sgirish 
58544961713Sgirish 	switch (pktbufsz_type) {
58644961713Sgirish 	case 0:
58744961713Sgirish 		bufsize = rbr_p->pkt_buf_size0;
58844961713Sgirish 		break;
58944961713Sgirish 	case 1:
59044961713Sgirish 		bufsize = rbr_p->pkt_buf_size1;
59144961713Sgirish 		break;
59244961713Sgirish 	case 2:
59344961713Sgirish 		bufsize = rbr_p->pkt_buf_size2;
59444961713Sgirish 		break;
59544961713Sgirish 	case RCR_SINGLE_BLOCK:
59644961713Sgirish 		bufsize = 0;
59744961713Sgirish 		anchor_index = 0;
59844961713Sgirish 		break;
59944961713Sgirish 	default:
60044961713Sgirish 		return (NXGE_ERROR);
60144961713Sgirish 	}
60244961713Sgirish 
60344961713Sgirish 	if (rbr_p->num_blocks == 1) {
60444961713Sgirish 		anchor_index = 0;
60544961713Sgirish 		ring_info = rbr_p->ring_info;
60644961713Sgirish 		bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
60744961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
60844961713Sgirish 			"==> nxge_rxbuf_pp_to_vp: (found, 1 block) "
60944961713Sgirish 			"buf_pp $%p btype %d anchor_index %d "
61044961713Sgirish 			"bufinfo $%p",
61144961713Sgirish 			pkt_buf_addr_pp,
61244961713Sgirish 			pktbufsz_type,
61344961713Sgirish 			anchor_index,
61444961713Sgirish 			bufinfo));
61544961713Sgirish 
61644961713Sgirish 		goto found_index;
61744961713Sgirish 	}
61844961713Sgirish 
61944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
62044961713Sgirish 		"==> nxge_rxbuf_pp_to_vp: "
62144961713Sgirish 		"buf_pp $%p btype %d  anchor_index %d",
62244961713Sgirish 		pkt_buf_addr_pp,
62344961713Sgirish 		pktbufsz_type,
62444961713Sgirish 		anchor_index));
62544961713Sgirish 
62644961713Sgirish 	ring_info = rbr_p->ring_info;
62744961713Sgirish 	found = B_FALSE;
62844961713Sgirish 	bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
62944961713Sgirish 	iteration = 0;
63044961713Sgirish 	max_iterations = ring_info->max_iterations;
63144961713Sgirish 		/*
632a3c5bd6dSspeer 		 * First check if this block has been seen
63344961713Sgirish 		 * recently. This is indicated by a hint which
63444961713Sgirish 		 * is initialized when the first buffer of the block
63544961713Sgirish 		 * is seen. The hint is reset when the last buffer of
63644961713Sgirish 		 * the block has been processed.
63744961713Sgirish 		 * As three block sizes are supported, three hints
63844961713Sgirish 		 * are kept. The idea behind the hints is that once
63944961713Sgirish 		 * the hardware  uses a block for a buffer  of that
64044961713Sgirish 		 * size, it will use it exclusively for that size
64144961713Sgirish 		 * and will use it until it is exhausted. It is assumed
64244961713Sgirish 		 * that there would a single block being used for the same
64344961713Sgirish 		 * buffer sizes at any given time.
64444961713Sgirish 		 */
64544961713Sgirish 	if (ring_info->hint[pktbufsz_type] != NO_HINT) {
64644961713Sgirish 		anchor_index = ring_info->hint[pktbufsz_type];
64744961713Sgirish 		dvma_addr =  bufinfo[anchor_index].dvma_addr;
64844961713Sgirish 		chunk_size = bufinfo[anchor_index].buf_size;
64944961713Sgirish 		if ((pktbuf_pp >= dvma_addr) &&
65044961713Sgirish 			(pktbuf_pp < (dvma_addr + chunk_size))) {
65144961713Sgirish 			found = B_TRUE;
65244961713Sgirish 				/*
65344961713Sgirish 				 * check if this is the last buffer in the block
65444961713Sgirish 				 * If so, then reset the hint for the size;
65544961713Sgirish 				 */
65644961713Sgirish 
65744961713Sgirish 			if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size))
65844961713Sgirish 				ring_info->hint[pktbufsz_type] = NO_HINT;
65944961713Sgirish 		}
66044961713Sgirish 	}
66144961713Sgirish 
66244961713Sgirish 	if (found == B_FALSE) {
66344961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
66444961713Sgirish 			"==> nxge_rxbuf_pp_to_vp: (!found)"
66544961713Sgirish 			"buf_pp $%p btype %d anchor_index %d",
66644961713Sgirish 			pkt_buf_addr_pp,
66744961713Sgirish 			pktbufsz_type,
66844961713Sgirish 			anchor_index));
66944961713Sgirish 
67044961713Sgirish 			/*
67144961713Sgirish 			 * This is the first buffer of the block of this
67244961713Sgirish 			 * size. Need to search the whole information
67344961713Sgirish 			 * array.
67444961713Sgirish 			 * the search algorithm uses a binary tree search
67544961713Sgirish 			 * algorithm. It assumes that the information is
67644961713Sgirish 			 * already sorted with increasing order
67744961713Sgirish 			 * info[0] < info[1] < info[2]  .... < info[n-1]
67844961713Sgirish 			 * where n is the size of the information array
67944961713Sgirish 			 */
68044961713Sgirish 		r_index = rbr_p->num_blocks - 1;
68144961713Sgirish 		l_index = 0;
68244961713Sgirish 		search_done = B_FALSE;
68344961713Sgirish 		anchor_index = MID_INDEX(r_index, l_index);
68444961713Sgirish 		while (search_done == B_FALSE) {
68544961713Sgirish 			if ((r_index == l_index) ||
68644961713Sgirish 				(iteration >= max_iterations))
68744961713Sgirish 				search_done = B_TRUE;
68844961713Sgirish 			end_side = TO_RIGHT; /* to the right */
68944961713Sgirish 			base_side = TO_LEFT; /* to the left */
69044961713Sgirish 			/* read the DVMA address information and sort it */
69144961713Sgirish 			dvma_addr =  bufinfo[anchor_index].dvma_addr;
69244961713Sgirish 			chunk_size = bufinfo[anchor_index].buf_size;
69344961713Sgirish 			NXGE_DEBUG_MSG((nxgep, RX2_CTL,
69444961713Sgirish 				"==> nxge_rxbuf_pp_to_vp: (searching)"
69544961713Sgirish 				"buf_pp $%p btype %d "
69644961713Sgirish 				"anchor_index %d chunk_size %d dvmaaddr $%p",
69744961713Sgirish 				pkt_buf_addr_pp,
69844961713Sgirish 				pktbufsz_type,
69944961713Sgirish 				anchor_index,
70044961713Sgirish 				chunk_size,
70144961713Sgirish 				dvma_addr));
70244961713Sgirish 
70344961713Sgirish 			if (pktbuf_pp >= dvma_addr)
70444961713Sgirish 				base_side = TO_RIGHT; /* to the right */
70544961713Sgirish 			if (pktbuf_pp < (dvma_addr + chunk_size))
70644961713Sgirish 				end_side = TO_LEFT; /* to the left */
70744961713Sgirish 
70844961713Sgirish 			switch (base_side + end_side) {
70944961713Sgirish 				case IN_MIDDLE:
71044961713Sgirish 					/* found */
71144961713Sgirish 					found = B_TRUE;
71244961713Sgirish 					search_done = B_TRUE;
71344961713Sgirish 					if ((pktbuf_pp + bufsize) <
71444961713Sgirish 						(dvma_addr + chunk_size))
71544961713Sgirish 						ring_info->hint[pktbufsz_type] =
71644961713Sgirish 						bufinfo[anchor_index].buf_index;
71744961713Sgirish 					break;
71844961713Sgirish 				case BOTH_RIGHT:
71944961713Sgirish 						/* not found: go to the right */
72044961713Sgirish 					l_index = anchor_index + 1;
72144961713Sgirish 					anchor_index =
72244961713Sgirish 						MID_INDEX(r_index, l_index);
72344961713Sgirish 					break;
72444961713Sgirish 
72544961713Sgirish 				case  BOTH_LEFT:
72644961713Sgirish 						/* not found: go to the left */
72744961713Sgirish 					r_index = anchor_index - 1;
72844961713Sgirish 					anchor_index = MID_INDEX(r_index,
72944961713Sgirish 						l_index);
73044961713Sgirish 					break;
73144961713Sgirish 				default: /* should not come here */
73244961713Sgirish 					return (NXGE_ERROR);
73344961713Sgirish 			}
73444961713Sgirish 			iteration++;
73544961713Sgirish 		}
73644961713Sgirish 
73744961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
73844961713Sgirish 			"==> nxge_rxbuf_pp_to_vp: (search done)"
73944961713Sgirish 			"buf_pp $%p btype %d anchor_index %d",
74044961713Sgirish 			pkt_buf_addr_pp,
74144961713Sgirish 			pktbufsz_type,
74244961713Sgirish 			anchor_index));
74344961713Sgirish 	}
74444961713Sgirish 
74544961713Sgirish 	if (found == B_FALSE) {
74644961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
74744961713Sgirish 			"==> nxge_rxbuf_pp_to_vp: (search failed)"
74844961713Sgirish 			"buf_pp $%p btype %d anchor_index %d",
74944961713Sgirish 			pkt_buf_addr_pp,
75044961713Sgirish 			pktbufsz_type,
75144961713Sgirish 			anchor_index));
75244961713Sgirish 		return (NXGE_ERROR);
75344961713Sgirish 	}
75444961713Sgirish 
75544961713Sgirish found_index:
75644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
75744961713Sgirish 		"==> nxge_rxbuf_pp_to_vp: (FOUND1)"
75844961713Sgirish 		"buf_pp $%p btype %d bufsize %d anchor_index %d",
75944961713Sgirish 		pkt_buf_addr_pp,
76044961713Sgirish 		pktbufsz_type,
76144961713Sgirish 		bufsize,
76244961713Sgirish 		anchor_index));
76344961713Sgirish 
76444961713Sgirish 	/* index of the first block in this chunk */
76544961713Sgirish 	chunk_index = bufinfo[anchor_index].start_index;
76644961713Sgirish 	dvma_addr =  bufinfo[anchor_index].dvma_addr;
76744961713Sgirish 	page_size_mask = ring_info->block_size_mask;
76844961713Sgirish 
76944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
77044961713Sgirish 		"==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)"
77144961713Sgirish 		"buf_pp $%p btype %d bufsize %d "
77244961713Sgirish 		"anchor_index %d chunk_index %d dvma $%p",
77344961713Sgirish 		pkt_buf_addr_pp,
77444961713Sgirish 		pktbufsz_type,
77544961713Sgirish 		bufsize,
77644961713Sgirish 		anchor_index,
77744961713Sgirish 		chunk_index,
77844961713Sgirish 		dvma_addr));
77944961713Sgirish 
78044961713Sgirish 	offset = pktbuf_pp - dvma_addr; /* offset within the chunk */
78144961713Sgirish 	block_size = rbr_p->block_size; /* System  block(page) size */
78244961713Sgirish 
78344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
78444961713Sgirish 		"==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)"
78544961713Sgirish 		"buf_pp $%p btype %d bufsize %d "
78644961713Sgirish 		"anchor_index %d chunk_index %d dvma $%p "
78744961713Sgirish 		"offset %d block_size %d",
78844961713Sgirish 		pkt_buf_addr_pp,
78944961713Sgirish 		pktbufsz_type,
79044961713Sgirish 		bufsize,
79144961713Sgirish 		anchor_index,
79244961713Sgirish 		chunk_index,
79344961713Sgirish 		dvma_addr,
79444961713Sgirish 		offset,
79544961713Sgirish 		block_size));
79644961713Sgirish 
79744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index"));
79844961713Sgirish 
79944961713Sgirish 	block_index = (offset / block_size); /* index within chunk */
80044961713Sgirish 	total_index = chunk_index + block_index;
80144961713Sgirish 
80244961713Sgirish 
80344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
80444961713Sgirish 		"==> nxge_rxbuf_pp_to_vp: "
80544961713Sgirish 		"total_index %d dvma_addr $%p "
80644961713Sgirish 		"offset %d block_size %d "
80744961713Sgirish 		"block_index %d ",
80844961713Sgirish 		total_index, dvma_addr,
80944961713Sgirish 		offset, block_size,
81044961713Sgirish 		block_index));
811adfcba55Sjoycey #if defined(__i386)
812adfcba55Sjoycey 	*pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr +
813adfcba55Sjoycey 		(uint32_t)offset);
814adfcba55Sjoycey #else
815adfcba55Sjoycey 	*pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr +
816adfcba55Sjoycey 		(uint64_t)offset);
817adfcba55Sjoycey #endif
81844961713Sgirish 
81944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
82044961713Sgirish 		"==> nxge_rxbuf_pp_to_vp: "
82144961713Sgirish 		"total_index %d dvma_addr $%p "
82244961713Sgirish 		"offset %d block_size %d "
82344961713Sgirish 		"block_index %d "
82444961713Sgirish 		"*pkt_buf_addr_p $%p",
82544961713Sgirish 		total_index, dvma_addr,
82644961713Sgirish 		offset, block_size,
82744961713Sgirish 		block_index,
82844961713Sgirish 		*pkt_buf_addr_p));
82944961713Sgirish 
83044961713Sgirish 
83144961713Sgirish 	*msg_index = total_index;
83244961713Sgirish 	*bufoffset =  (offset & page_size_mask);
83344961713Sgirish 
83444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
83544961713Sgirish 		"==> nxge_rxbuf_pp_to_vp: get msg index: "
83644961713Sgirish 		"msg_index %d bufoffset_index %d",
83744961713Sgirish 		*msg_index,
83844961713Sgirish 		*bufoffset));
83944961713Sgirish 
84044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp"));
84144961713Sgirish 
84244961713Sgirish 	return (NXGE_OK);
84344961713Sgirish }
84444961713Sgirish 
84544961713Sgirish /*
84644961713Sgirish  * used by quick sort (qsort) function
84744961713Sgirish  * to perform comparison
84844961713Sgirish  */
84944961713Sgirish static int
85044961713Sgirish nxge_sort_compare(const void *p1, const void *p2)
85144961713Sgirish {
85244961713Sgirish 
85344961713Sgirish 	rxbuf_index_info_t *a, *b;
85444961713Sgirish 
85544961713Sgirish 	a = (rxbuf_index_info_t *)p1;
85644961713Sgirish 	b = (rxbuf_index_info_t *)p2;
85744961713Sgirish 
85844961713Sgirish 	if (a->dvma_addr > b->dvma_addr)
85944961713Sgirish 		return (1);
86044961713Sgirish 	if (a->dvma_addr < b->dvma_addr)
86144961713Sgirish 		return (-1);
86244961713Sgirish 	return (0);
86344961713Sgirish }
86444961713Sgirish 
86544961713Sgirish 
86644961713Sgirish 
86744961713Sgirish /*
86844961713Sgirish  * grabbed this sort implementation from common/syscall/avl.c
86944961713Sgirish  *
87044961713Sgirish  */
87144961713Sgirish /*
87244961713Sgirish  * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified.
87344961713Sgirish  * v = Ptr to array/vector of objs
87444961713Sgirish  * n = # objs in the array
87544961713Sgirish  * s = size of each obj (must be multiples of a word size)
87644961713Sgirish  * f = ptr to function to compare two objs
87744961713Sgirish  *	returns (-1 = less than, 0 = equal, 1 = greater than
87844961713Sgirish  */
87944961713Sgirish void
88044961713Sgirish nxge_ksort(caddr_t v, int n, int s, int (*f)())
88144961713Sgirish {
88244961713Sgirish 	int g, i, j, ii;
88344961713Sgirish 	unsigned int *p1, *p2;
88444961713Sgirish 	unsigned int tmp;
88544961713Sgirish 
88644961713Sgirish 	/* No work to do */
88744961713Sgirish 	if (v == NULL || n <= 1)
88844961713Sgirish 		return;
88944961713Sgirish 	/* Sanity check on arguments */
89044961713Sgirish 	ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0);
89144961713Sgirish 	ASSERT(s > 0);
89244961713Sgirish 
89344961713Sgirish 	for (g = n / 2; g > 0; g /= 2) {
89444961713Sgirish 		for (i = g; i < n; i++) {
89544961713Sgirish 			for (j = i - g; j >= 0 &&
89644961713Sgirish 				(*f)(v + j * s, v + (j + g) * s) == 1;
89744961713Sgirish 					j -= g) {
89844961713Sgirish 				p1 = (unsigned *)(v + j * s);
89944961713Sgirish 				p2 = (unsigned *)(v + (j + g) * s);
90044961713Sgirish 				for (ii = 0; ii < s / 4; ii++) {
90144961713Sgirish 					tmp = *p1;
90244961713Sgirish 					*p1++ = *p2;
90344961713Sgirish 					*p2++ = tmp;
90444961713Sgirish 				}
90544961713Sgirish 			}
90644961713Sgirish 		}
90744961713Sgirish 	}
90844961713Sgirish }
90944961713Sgirish 
91044961713Sgirish /*
91144961713Sgirish  * Initialize data structures required for rxdma
91244961713Sgirish  * buffer dvma->vmem address lookup
91344961713Sgirish  */
91444961713Sgirish /*ARGSUSED*/
91544961713Sgirish static nxge_status_t
91644961713Sgirish nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp)
91744961713Sgirish {
91844961713Sgirish 
91944961713Sgirish 	int index;
92044961713Sgirish 	rxring_info_t *ring_info;
92144961713Sgirish 	int max_iteration = 0, max_index = 0;
92244961713Sgirish 
92344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init"));
92444961713Sgirish 
92544961713Sgirish 	ring_info = rbrp->ring_info;
92644961713Sgirish 	ring_info->hint[0] = NO_HINT;
92744961713Sgirish 	ring_info->hint[1] = NO_HINT;
92844961713Sgirish 	ring_info->hint[2] = NO_HINT;
92944961713Sgirish 	max_index = rbrp->num_blocks;
93044961713Sgirish 
93144961713Sgirish 		/* read the DVMA address information and sort it */
93244961713Sgirish 		/* do init of the information array */
93344961713Sgirish 
93444961713Sgirish 
93544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
93644961713Sgirish 		" nxge_rxbuf_index_info_init Sort ptrs"));
93744961713Sgirish 
93844961713Sgirish 		/* sort the array */
93944961713Sgirish 	nxge_ksort((void *)ring_info->buffer, max_index,
94044961713Sgirish 		sizeof (rxbuf_index_info_t), nxge_sort_compare);
94144961713Sgirish 
94244961713Sgirish 
94344961713Sgirish 
94444961713Sgirish 	for (index = 0; index < max_index; index++) {
94544961713Sgirish 		NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
94644961713Sgirish 			" nxge_rxbuf_index_info_init: sorted chunk %d "
94744961713Sgirish 			" ioaddr $%p kaddr $%p size %x",
94844961713Sgirish 			index, ring_info->buffer[index].dvma_addr,
94944961713Sgirish 			ring_info->buffer[index].kaddr,
95044961713Sgirish 			ring_info->buffer[index].buf_size));
95144961713Sgirish 	}
95244961713Sgirish 
95344961713Sgirish 	max_iteration = 0;
95444961713Sgirish 	while (max_index >= (1ULL << max_iteration))
95544961713Sgirish 		max_iteration++;
95644961713Sgirish 	ring_info->max_iterations = max_iteration + 1;
95744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
95844961713Sgirish 		" nxge_rxbuf_index_info_init Find max iter %d",
95944961713Sgirish 					ring_info->max_iterations));
96044961713Sgirish 
96144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init"));
96244961713Sgirish 	return (NXGE_OK);
96344961713Sgirish }
96444961713Sgirish 
9650a8e077aSspeer /* ARGSUSED */
96644961713Sgirish void
96744961713Sgirish nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p)
96844961713Sgirish {
96944961713Sgirish #ifdef	NXGE_DEBUG
97044961713Sgirish 
97144961713Sgirish 	uint32_t bptr;
97244961713Sgirish 	uint64_t pp;
97344961713Sgirish 
97444961713Sgirish 	bptr = entry_p->bits.hdw.pkt_buf_addr;
97544961713Sgirish 
97644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
97744961713Sgirish 		"\trcr entry $%p "
97844961713Sgirish 		"\trcr entry 0x%0llx "
97944961713Sgirish 		"\trcr entry 0x%08x "
98044961713Sgirish 		"\trcr entry 0x%08x "
98144961713Sgirish 		"\tvalue 0x%0llx\n"
98244961713Sgirish 		"\tmulti = %d\n"
98344961713Sgirish 		"\tpkt_type = 0x%x\n"
98444961713Sgirish 		"\tzero_copy = %d\n"
98544961713Sgirish 		"\tnoport = %d\n"
98644961713Sgirish 		"\tpromis = %d\n"
98744961713Sgirish 		"\terror = 0x%04x\n"
98844961713Sgirish 		"\tdcf_err = 0x%01x\n"
98944961713Sgirish 		"\tl2_len = %d\n"
99044961713Sgirish 		"\tpktbufsize = %d\n"
99144961713Sgirish 		"\tpkt_buf_addr = $%p\n"
99244961713Sgirish 		"\tpkt_buf_addr (<< 6) = $%p\n",
99344961713Sgirish 		entry_p,
99444961713Sgirish 		*(int64_t *)entry_p,
99544961713Sgirish 		*(int32_t *)entry_p,
99644961713Sgirish 		*(int32_t *)((char *)entry_p + 32),
99744961713Sgirish 		entry_p->value,
99844961713Sgirish 		entry_p->bits.hdw.multi,
99944961713Sgirish 		entry_p->bits.hdw.pkt_type,
100044961713Sgirish 		entry_p->bits.hdw.zero_copy,
100144961713Sgirish 		entry_p->bits.hdw.noport,
100244961713Sgirish 		entry_p->bits.hdw.promis,
100344961713Sgirish 		entry_p->bits.hdw.error,
100444961713Sgirish 		entry_p->bits.hdw.dcf_err,
100544961713Sgirish 		entry_p->bits.hdw.l2_len,
100644961713Sgirish 		entry_p->bits.hdw.pktbufsz,
100744961713Sgirish 		bptr,
100844961713Sgirish 		entry_p->bits.ldw.pkt_buf_addr));
100944961713Sgirish 
101044961713Sgirish 	pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) <<
101144961713Sgirish 		RCR_PKT_BUF_ADDR_SHIFT;
101244961713Sgirish 
101344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d",
101444961713Sgirish 		pp, (*(int64_t *)entry_p >> 40) & 0x3fff));
101544961713Sgirish #endif
101644961713Sgirish }
101744961713Sgirish 
101844961713Sgirish void
101944961713Sgirish nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc)
102044961713Sgirish {
102144961713Sgirish 	npi_handle_t		handle;
102244961713Sgirish 	rbr_stat_t 		rbr_stat;
102344961713Sgirish 	addr44_t 		hd_addr;
102444961713Sgirish 	addr44_t 		tail_addr;
102544961713Sgirish 	uint16_t 		qlen;
102644961713Sgirish 
102744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
102844961713Sgirish 		"==> nxge_rxdma_regs_dump: rdc channel %d", rdc));
102944961713Sgirish 
103044961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
103144961713Sgirish 
103244961713Sgirish 	/* RBR head */
103344961713Sgirish 	hd_addr.addr = 0;
103444961713Sgirish 	(void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr);
1035adfcba55Sjoycey #if defined(__i386)
103653f3d8ecSyc 	printf("nxge_rxdma_regs_dump: got hdptr $%p \n",
1037adfcba55Sjoycey 		(void *)(uint32_t)hd_addr.addr);
1038adfcba55Sjoycey #else
103953f3d8ecSyc 	printf("nxge_rxdma_regs_dump: got hdptr $%p \n",
104044961713Sgirish 		(void *)hd_addr.addr);
1041adfcba55Sjoycey #endif
104244961713Sgirish 
104344961713Sgirish 	/* RBR stats */
104444961713Sgirish 	(void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat);
104544961713Sgirish 	printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen);
104644961713Sgirish 
104744961713Sgirish 	/* RCR tail */
104844961713Sgirish 	tail_addr.addr = 0;
104944961713Sgirish 	(void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr);
1050adfcba55Sjoycey #if defined(__i386)
105153f3d8ecSyc 	printf("nxge_rxdma_regs_dump: got tail ptr $%p \n",
1052adfcba55Sjoycey 		(void *)(uint32_t)tail_addr.addr);
1053adfcba55Sjoycey #else
105453f3d8ecSyc 	printf("nxge_rxdma_regs_dump: got tail ptr $%p \n",
105544961713Sgirish 		(void *)tail_addr.addr);
1056adfcba55Sjoycey #endif
105744961713Sgirish 
105844961713Sgirish 	/* RCR qlen */
105944961713Sgirish 	(void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen);
106044961713Sgirish 	printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen);
106144961713Sgirish 
106244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
106344961713Sgirish 		"<== nxge_rxdma_regs_dump: rdc rdc %d", rdc));
106444961713Sgirish }
106544961713Sgirish 
106644961713Sgirish void
106744961713Sgirish nxge_rxdma_stop(p_nxge_t nxgep)
106844961713Sgirish {
106944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop"));
107044961713Sgirish 
107144961713Sgirish 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
107244961713Sgirish 	(void) nxge_rx_mac_disable(nxgep);
107344961713Sgirish 	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
107444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop"));
107544961713Sgirish }
107644961713Sgirish 
107744961713Sgirish void
107844961713Sgirish nxge_rxdma_stop_reinit(p_nxge_t nxgep)
107944961713Sgirish {
108044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_reinit"));
108144961713Sgirish 
108244961713Sgirish 	(void) nxge_rxdma_stop(nxgep);
108344961713Sgirish 	(void) nxge_uninit_rxdma_channels(nxgep);
108444961713Sgirish 	(void) nxge_init_rxdma_channels(nxgep);
108544961713Sgirish 
108644961713Sgirish #ifndef	AXIS_DEBUG_LB
108744961713Sgirish 	(void) nxge_xcvr_init(nxgep);
108844961713Sgirish 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
108944961713Sgirish #endif
109044961713Sgirish 	(void) nxge_rx_mac_enable(nxgep);
109144961713Sgirish 
109244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_reinit"));
109344961713Sgirish }
109444961713Sgirish 
109544961713Sgirish nxge_status_t
109644961713Sgirish nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
109744961713Sgirish {
109844961713Sgirish 	int			i, ndmas;
109944961713Sgirish 	uint16_t		channel;
110044961713Sgirish 	p_rx_rbr_rings_t 	rx_rbr_rings;
110144961713Sgirish 	p_rx_rbr_ring_t		*rbr_rings;
110244961713Sgirish 	npi_handle_t		handle;
110344961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
110444961713Sgirish 	nxge_status_t		status = NXGE_OK;
110544961713Sgirish 
110644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
110744961713Sgirish 		"==> nxge_rxdma_hw_mode: mode %d", enable));
110844961713Sgirish 
110944961713Sgirish 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
111044961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
111144961713Sgirish 			"<== nxge_rxdma_mode: not initialized"));
111244961713Sgirish 		return (NXGE_ERROR);
111344961713Sgirish 	}
111444961713Sgirish 
111544961713Sgirish 	rx_rbr_rings = nxgep->rx_rbr_rings;
111644961713Sgirish 	if (rx_rbr_rings == NULL) {
111744961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
111844961713Sgirish 			"<== nxge_rxdma_mode: NULL ring pointer"));
111944961713Sgirish 		return (NXGE_ERROR);
112044961713Sgirish 	}
112144961713Sgirish 	if (rx_rbr_rings->rbr_rings == NULL) {
112244961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
112344961713Sgirish 			"<== nxge_rxdma_mode: NULL rbr rings pointer"));
112444961713Sgirish 		return (NXGE_ERROR);
112544961713Sgirish 	}
112644961713Sgirish 
112744961713Sgirish 	ndmas = rx_rbr_rings->ndmas;
112844961713Sgirish 	if (!ndmas) {
112944961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
113044961713Sgirish 			"<== nxge_rxdma_mode: no channel"));
113144961713Sgirish 		return (NXGE_ERROR);
113244961713Sgirish 	}
113344961713Sgirish 
113444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
113544961713Sgirish 		"==> nxge_rxdma_mode (ndmas %d)", ndmas));
113644961713Sgirish 
113744961713Sgirish 	rbr_rings = rx_rbr_rings->rbr_rings;
113844961713Sgirish 
113944961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
114044961713Sgirish 	for (i = 0; i < ndmas; i++) {
114144961713Sgirish 		if (rbr_rings == NULL || rbr_rings[i] == NULL) {
114244961713Sgirish 			continue;
114344961713Sgirish 		}
114444961713Sgirish 		channel = rbr_rings[i]->rdc;
114544961713Sgirish 		if (enable) {
114644961713Sgirish 			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
114744961713Sgirish 				"==> nxge_rxdma_hw_mode: channel %d (enable)",
114844961713Sgirish 				channel));
114944961713Sgirish 			rs = npi_rxdma_cfg_rdc_enable(handle, channel);
115044961713Sgirish 		} else {
115144961713Sgirish 			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
115244961713Sgirish 				"==> nxge_rxdma_hw_mode: channel %d (disable)",
115344961713Sgirish 				channel));
115444961713Sgirish 			rs = npi_rxdma_cfg_rdc_disable(handle, channel);
115544961713Sgirish 		}
115644961713Sgirish 	}
115744961713Sgirish 
115844961713Sgirish 	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
115944961713Sgirish 
116044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
116144961713Sgirish 		"<== nxge_rxdma_hw_mode: status 0x%x", status));
116244961713Sgirish 
116344961713Sgirish 	return (status);
116444961713Sgirish }
116544961713Sgirish 
116644961713Sgirish void
116744961713Sgirish nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
116844961713Sgirish {
116944961713Sgirish 	npi_handle_t		handle;
117044961713Sgirish 
117144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
117244961713Sgirish 		"==> nxge_rxdma_enable_channel: channel %d", channel));
117344961713Sgirish 
117444961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
117544961713Sgirish 	(void) npi_rxdma_cfg_rdc_enable(handle, channel);
117644961713Sgirish 
117744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel"));
117844961713Sgirish }
117944961713Sgirish 
118044961713Sgirish void
118144961713Sgirish nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
118244961713Sgirish {
118344961713Sgirish 	npi_handle_t		handle;
118444961713Sgirish 
118544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
118644961713Sgirish 		"==> nxge_rxdma_disable_channel: channel %d", channel));
118744961713Sgirish 
118844961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
118944961713Sgirish 	(void) npi_rxdma_cfg_rdc_disable(handle, channel);
119044961713Sgirish 
119144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel"));
119244961713Sgirish }
119344961713Sgirish 
119444961713Sgirish void
119544961713Sgirish nxge_hw_start_rx(p_nxge_t nxgep)
119644961713Sgirish {
119744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx"));
119844961713Sgirish 
119944961713Sgirish 	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START);
120044961713Sgirish 	(void) nxge_rx_mac_enable(nxgep);
120144961713Sgirish 
120244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx"));
120344961713Sgirish }
120444961713Sgirish 
120544961713Sgirish /*ARGSUSED*/
120644961713Sgirish void
120744961713Sgirish nxge_fixup_rxdma_rings(p_nxge_t nxgep)
120844961713Sgirish {
120944961713Sgirish 	int			i, ndmas;
121044961713Sgirish 	uint16_t		rdc;
121144961713Sgirish 	p_rx_rbr_rings_t 	rx_rbr_rings;
121244961713Sgirish 	p_rx_rbr_ring_t		*rbr_rings;
121344961713Sgirish 	p_rx_rcr_rings_t 	rx_rcr_rings;
121444961713Sgirish 
121544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings"));
121644961713Sgirish 
121744961713Sgirish 	rx_rbr_rings = nxgep->rx_rbr_rings;
121844961713Sgirish 	if (rx_rbr_rings == NULL) {
121944961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
122044961713Sgirish 			"<== nxge_fixup_rxdma_rings: NULL ring pointer"));
122144961713Sgirish 		return;
122244961713Sgirish 	}
122344961713Sgirish 	ndmas = rx_rbr_rings->ndmas;
122444961713Sgirish 	if (!ndmas) {
122544961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
122644961713Sgirish 			"<== nxge_fixup_rxdma_rings: no channel"));
122744961713Sgirish 		return;
122844961713Sgirish 	}
122944961713Sgirish 
123044961713Sgirish 	rx_rcr_rings = nxgep->rx_rcr_rings;
123144961713Sgirish 	if (rx_rcr_rings == NULL) {
123244961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
123344961713Sgirish 			"<== nxge_fixup_rxdma_rings: NULL ring pointer"));
123444961713Sgirish 		return;
123544961713Sgirish 	}
123644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
123744961713Sgirish 		"==> nxge_fixup_rxdma_rings (ndmas %d)", ndmas));
123844961713Sgirish 
123944961713Sgirish 	nxge_rxdma_hw_stop(nxgep);
124044961713Sgirish 
124144961713Sgirish 	rbr_rings = rx_rbr_rings->rbr_rings;
124244961713Sgirish 	for (i = 0; i < ndmas; i++) {
124344961713Sgirish 		rdc = rbr_rings[i]->rdc;
124444961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
124544961713Sgirish 			"==> nxge_fixup_rxdma_rings: channel %d "
124644961713Sgirish 			"ring $%px", rdc, rbr_rings[i]));
124744961713Sgirish 		(void) nxge_rxdma_fixup_channel(nxgep, rdc, i);
124844961713Sgirish 	}
124944961713Sgirish 
125044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings"));
125144961713Sgirish }
125244961713Sgirish 
125344961713Sgirish void
125444961713Sgirish nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
125544961713Sgirish {
125644961713Sgirish 	int		i;
125744961713Sgirish 
125844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel"));
125944961713Sgirish 	i = nxge_rxdma_get_ring_index(nxgep, channel);
126044961713Sgirish 	if (i < 0) {
126144961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
126244961713Sgirish 			"<== nxge_rxdma_fix_channel: no entry found"));
126344961713Sgirish 		return;
126444961713Sgirish 	}
126544961713Sgirish 
126644961713Sgirish 	nxge_rxdma_fixup_channel(nxgep, channel, i);
126744961713Sgirish 
126844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_txdma_fix_channel"));
126944961713Sgirish }
127044961713Sgirish 
127144961713Sgirish void
127244961713Sgirish nxge_rxdma_fixup_channel(p_nxge_t nxgep, uint16_t channel, int entry)
127344961713Sgirish {
127444961713Sgirish 	int			ndmas;
127544961713Sgirish 	p_rx_rbr_rings_t 	rx_rbr_rings;
127644961713Sgirish 	p_rx_rbr_ring_t		*rbr_rings;
127744961713Sgirish 	p_rx_rcr_rings_t 	rx_rcr_rings;
127844961713Sgirish 	p_rx_rcr_ring_t		*rcr_rings;
127944961713Sgirish 	p_rx_mbox_areas_t 	rx_mbox_areas_p;
128044961713Sgirish 	p_rx_mbox_t		*rx_mbox_p;
128144961713Sgirish 	p_nxge_dma_pool_t	dma_buf_poolp;
128244961713Sgirish 	p_nxge_dma_pool_t	dma_cntl_poolp;
128344961713Sgirish 	p_rx_rbr_ring_t 	rbrp;
128444961713Sgirish 	p_rx_rcr_ring_t 	rcrp;
128544961713Sgirish 	p_rx_mbox_t 		mboxp;
128644961713Sgirish 	p_nxge_dma_common_t 	dmap;
128744961713Sgirish 	nxge_status_t		status = NXGE_OK;
128844961713Sgirish 
128944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fixup_channel"));
129044961713Sgirish 
129144961713Sgirish 	(void) nxge_rxdma_stop_channel(nxgep, channel);
129244961713Sgirish 
129344961713Sgirish 	dma_buf_poolp = nxgep->rx_buf_pool_p;
129444961713Sgirish 	dma_cntl_poolp = nxgep->rx_cntl_pool_p;
129544961713Sgirish 
129644961713Sgirish 	if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
129744961713Sgirish 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
129844961713Sgirish 			"<== nxge_rxdma_fixup_channel: buf not allocated"));
129944961713Sgirish 		return;
130044961713Sgirish 	}
130144961713Sgirish 
130244961713Sgirish 	ndmas = dma_buf_poolp->ndmas;
130344961713Sgirish 	if (!ndmas) {
130444961713Sgirish 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
130544961713Sgirish 			"<== nxge_rxdma_fixup_channel: no dma allocated"));
130644961713Sgirish 		return;
130744961713Sgirish 	}
130844961713Sgirish 
1309a3c5bd6dSspeer 	rx_rbr_rings = nxgep->rx_rbr_rings;
131044961713Sgirish 	rx_rcr_rings = nxgep->rx_rcr_rings;
131144961713Sgirish 	rbr_rings = rx_rbr_rings->rbr_rings;
131244961713Sgirish 	rcr_rings = rx_rcr_rings->rcr_rings;
131344961713Sgirish 	rx_mbox_areas_p = nxgep->rx_mbox_areas_p;
131444961713Sgirish 	rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
131544961713Sgirish 
131644961713Sgirish 	/* Reinitialize the receive block and completion rings */
131744961713Sgirish 	rbrp = (p_rx_rbr_ring_t)rbr_rings[entry],
131844961713Sgirish 	rcrp = (p_rx_rcr_ring_t)rcr_rings[entry],
131944961713Sgirish 	mboxp = (p_rx_mbox_t)rx_mbox_p[entry];
132044961713Sgirish 
132144961713Sgirish 
132244961713Sgirish 	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
132344961713Sgirish 	rbrp->rbr_rd_index = 0;
132444961713Sgirish 	rcrp->comp_rd_index = 0;
132544961713Sgirish 	rcrp->comp_wt_index = 0;
132644961713Sgirish 
132744961713Sgirish 	dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
132844961713Sgirish 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
132944961713Sgirish 
133044961713Sgirish 	status = nxge_rxdma_start_channel(nxgep, channel,
133144961713Sgirish 			rbrp, rcrp, mboxp);
133244961713Sgirish 	if (status != NXGE_OK) {
133344961713Sgirish 		goto nxge_rxdma_fixup_channel_fail;
133444961713Sgirish 	}
133544961713Sgirish 	if (status != NXGE_OK) {
133644961713Sgirish 		goto nxge_rxdma_fixup_channel_fail;
133744961713Sgirish 	}
133844961713Sgirish 
133944961713Sgirish nxge_rxdma_fixup_channel_fail:
134044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
134144961713Sgirish 		"==> nxge_rxdma_fixup_channel: failed (0x%08x)", status));
134244961713Sgirish 
134344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel"));
134444961713Sgirish }
134544961713Sgirish 
134644961713Sgirish int
134744961713Sgirish nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel)
134844961713Sgirish {
134944961713Sgirish 	int			i, ndmas;
135044961713Sgirish 	uint16_t		rdc;
135144961713Sgirish 	p_rx_rbr_rings_t 	rx_rbr_rings;
135244961713Sgirish 	p_rx_rbr_ring_t		*rbr_rings;
135344961713Sgirish 
135444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
135544961713Sgirish 		"==> nxge_rxdma_get_ring_index: channel %d", channel));
135644961713Sgirish 
135744961713Sgirish 	rx_rbr_rings = nxgep->rx_rbr_rings;
135844961713Sgirish 	if (rx_rbr_rings == NULL) {
135944961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
136044961713Sgirish 			"<== nxge_rxdma_get_ring_index: NULL ring pointer"));
136144961713Sgirish 		return (-1);
136244961713Sgirish 	}
136344961713Sgirish 	ndmas = rx_rbr_rings->ndmas;
136444961713Sgirish 	if (!ndmas) {
136544961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
136644961713Sgirish 			"<== nxge_rxdma_get_ring_index: no channel"));
136744961713Sgirish 		return (-1);
136844961713Sgirish 	}
136944961713Sgirish 
137044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
137144961713Sgirish 		"==> nxge_rxdma_get_ring_index (ndmas %d)", ndmas));
137244961713Sgirish 
137344961713Sgirish 	rbr_rings = rx_rbr_rings->rbr_rings;
137444961713Sgirish 	for (i = 0; i < ndmas; i++) {
137544961713Sgirish 		rdc = rbr_rings[i]->rdc;
137644961713Sgirish 		if (channel == rdc) {
137744961713Sgirish 			NXGE_DEBUG_MSG((nxgep, RX_CTL,
137844961713Sgirish 				"==> nxge_rxdma_get_rbr_ring: "
137944961713Sgirish 				"channel %d (index %d) "
138044961713Sgirish 				"ring %d", channel, i,
138144961713Sgirish 				rbr_rings[i]));
138244961713Sgirish 			return (i);
138344961713Sgirish 		}
138444961713Sgirish 	}
138544961713Sgirish 
138644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
138744961713Sgirish 		"<== nxge_rxdma_get_rbr_ring_index: not found"));
138844961713Sgirish 
138944961713Sgirish 	return (-1);
139044961713Sgirish }
139144961713Sgirish 
139244961713Sgirish p_rx_rbr_ring_t
139344961713Sgirish nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel)
139444961713Sgirish {
139544961713Sgirish 	int			i, ndmas;
139644961713Sgirish 	uint16_t		rdc;
139744961713Sgirish 	p_rx_rbr_rings_t 	rx_rbr_rings;
139844961713Sgirish 	p_rx_rbr_ring_t		*rbr_rings;
139944961713Sgirish 
140044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
140144961713Sgirish 		"==> nxge_rxdma_get_rbr_ring: channel %d", channel));
140244961713Sgirish 
140344961713Sgirish 	rx_rbr_rings = nxgep->rx_rbr_rings;
140444961713Sgirish 	if (rx_rbr_rings == NULL) {
140544961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
140644961713Sgirish 			"<== nxge_rxdma_get_rbr_ring: NULL ring pointer"));
140744961713Sgirish 		return (NULL);
140844961713Sgirish 	}
140944961713Sgirish 	ndmas = rx_rbr_rings->ndmas;
141044961713Sgirish 	if (!ndmas) {
141144961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
141244961713Sgirish 			"<== nxge_rxdma_get_rbr_ring: no channel"));
141344961713Sgirish 		return (NULL);
141444961713Sgirish 	}
141544961713Sgirish 
141644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
141744961713Sgirish 		"==> nxge_rxdma_get_ring (ndmas %d)", ndmas));
141844961713Sgirish 
141944961713Sgirish 	rbr_rings = rx_rbr_rings->rbr_rings;
142044961713Sgirish 	for (i = 0; i < ndmas; i++) {
142144961713Sgirish 		rdc = rbr_rings[i]->rdc;
142244961713Sgirish 		if (channel == rdc) {
142344961713Sgirish 			NXGE_DEBUG_MSG((nxgep, RX_CTL,
142444961713Sgirish 				"==> nxge_rxdma_get_rbr_ring: channel %d "
142544961713Sgirish 				"ring $%p", channel, rbr_rings[i]));
142644961713Sgirish 			return (rbr_rings[i]);
142744961713Sgirish 		}
142844961713Sgirish 	}
142944961713Sgirish 
143044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
143144961713Sgirish 		"<== nxge_rxdma_get_rbr_ring: not found"));
143244961713Sgirish 
143344961713Sgirish 	return (NULL);
143444961713Sgirish }
143544961713Sgirish 
143644961713Sgirish p_rx_rcr_ring_t
143744961713Sgirish nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel)
143844961713Sgirish {
143944961713Sgirish 	int			i, ndmas;
144044961713Sgirish 	uint16_t		rdc;
144144961713Sgirish 	p_rx_rcr_rings_t 	rx_rcr_rings;
144244961713Sgirish 	p_rx_rcr_ring_t		*rcr_rings;
144344961713Sgirish 
144444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
144544961713Sgirish 		"==> nxge_rxdma_get_rcr_ring: channel %d", channel));
144644961713Sgirish 
144744961713Sgirish 	rx_rcr_rings = nxgep->rx_rcr_rings;
144844961713Sgirish 	if (rx_rcr_rings == NULL) {
144944961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
145044961713Sgirish 			"<== nxge_rxdma_get_rcr_ring: NULL ring pointer"));
145144961713Sgirish 		return (NULL);
145244961713Sgirish 	}
145344961713Sgirish 	ndmas = rx_rcr_rings->ndmas;
145444961713Sgirish 	if (!ndmas) {
145544961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
145644961713Sgirish 			"<== nxge_rxdma_get_rcr_ring: no channel"));
145744961713Sgirish 		return (NULL);
145844961713Sgirish 	}
145944961713Sgirish 
146044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
146144961713Sgirish 		"==> nxge_rxdma_get_rcr_ring (ndmas %d)", ndmas));
146244961713Sgirish 
146344961713Sgirish 	rcr_rings = rx_rcr_rings->rcr_rings;
146444961713Sgirish 	for (i = 0; i < ndmas; i++) {
146544961713Sgirish 		rdc = rcr_rings[i]->rdc;
146644961713Sgirish 		if (channel == rdc) {
146744961713Sgirish 			NXGE_DEBUG_MSG((nxgep, RX_CTL,
146844961713Sgirish 				"==> nxge_rxdma_get_rcr_ring: channel %d "
146944961713Sgirish 				"ring $%p", channel, rcr_rings[i]));
147044961713Sgirish 			return (rcr_rings[i]);
147144961713Sgirish 		}
147244961713Sgirish 	}
147344961713Sgirish 
147444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
147544961713Sgirish 		"<== nxge_rxdma_get_rcr_ring: not found"));
147644961713Sgirish 
147744961713Sgirish 	return (NULL);
147844961713Sgirish }
147944961713Sgirish 
148044961713Sgirish /*
148144961713Sgirish  * Static functions start here.
148244961713Sgirish  */
148344961713Sgirish static p_rx_msg_t
148444961713Sgirish nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p)
148544961713Sgirish {
148644961713Sgirish 	p_rx_msg_t nxge_mp 		= NULL;
148744961713Sgirish 	p_nxge_dma_common_t		dmamsg_p;
148844961713Sgirish 	uchar_t 			*buffer;
148944961713Sgirish 
149044961713Sgirish 	nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP);
149144961713Sgirish 	if (nxge_mp == NULL) {
149256d930aeSspeer 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
149344961713Sgirish 			"Allocation of a rx msg failed."));
149444961713Sgirish 		goto nxge_allocb_exit;
149544961713Sgirish 	}
149644961713Sgirish 
149744961713Sgirish 	nxge_mp->use_buf_pool = B_FALSE;
149844961713Sgirish 	if (dmabuf_p) {
149944961713Sgirish 		nxge_mp->use_buf_pool = B_TRUE;
150044961713Sgirish 		dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma;
150144961713Sgirish 		*dmamsg_p = *dmabuf_p;
150244961713Sgirish 		dmamsg_p->nblocks = 1;
150344961713Sgirish 		dmamsg_p->block_size = size;
150444961713Sgirish 		dmamsg_p->alength = size;
150544961713Sgirish 		buffer = (uchar_t *)dmabuf_p->kaddrp;
150644961713Sgirish 
150744961713Sgirish 		dmabuf_p->kaddrp = (void *)
150844961713Sgirish 				((char *)dmabuf_p->kaddrp + size);
150944961713Sgirish 		dmabuf_p->ioaddr_pp = (void *)
151044961713Sgirish 				((char *)dmabuf_p->ioaddr_pp + size);
151144961713Sgirish 		dmabuf_p->alength -= size;
151244961713Sgirish 		dmabuf_p->offset += size;
151344961713Sgirish 		dmabuf_p->dma_cookie.dmac_laddress += size;
151444961713Sgirish 		dmabuf_p->dma_cookie.dmac_size -= size;
151544961713Sgirish 
151644961713Sgirish 	} else {
151744961713Sgirish 		buffer = KMEM_ALLOC(size, KM_NOSLEEP);
151844961713Sgirish 		if (buffer == NULL) {
151956d930aeSspeer 			NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
152044961713Sgirish 				"Allocation of a receive page failed."));
152144961713Sgirish 			goto nxge_allocb_fail1;
152244961713Sgirish 		}
152344961713Sgirish 	}
152444961713Sgirish 
152544961713Sgirish 	nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb);
152644961713Sgirish 	if (nxge_mp->rx_mblk_p == NULL) {
152756d930aeSspeer 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed."));
152844961713Sgirish 		goto nxge_allocb_fail2;
152944961713Sgirish 	}
153044961713Sgirish 
153144961713Sgirish 	nxge_mp->buffer = buffer;
153244961713Sgirish 	nxge_mp->block_size = size;
153344961713Sgirish 	nxge_mp->freeb.free_func = (void (*)())nxge_freeb;
153444961713Sgirish 	nxge_mp->freeb.free_arg = (caddr_t)nxge_mp;
153544961713Sgirish 	nxge_mp->ref_cnt = 1;
153644961713Sgirish 	nxge_mp->free = B_TRUE;
153744961713Sgirish 	nxge_mp->rx_use_bcopy = B_FALSE;
153844961713Sgirish 
153914ea4bb7Ssd 	atomic_inc_32(&nxge_mblks_pending);
154044961713Sgirish 
154144961713Sgirish 	goto nxge_allocb_exit;
154244961713Sgirish 
154344961713Sgirish nxge_allocb_fail2:
154444961713Sgirish 	if (!nxge_mp->use_buf_pool) {
154544961713Sgirish 		KMEM_FREE(buffer, size);
154644961713Sgirish 	}
154744961713Sgirish 
154844961713Sgirish nxge_allocb_fail1:
154944961713Sgirish 	KMEM_FREE(nxge_mp, sizeof (rx_msg_t));
155044961713Sgirish 	nxge_mp = NULL;
155144961713Sgirish 
155244961713Sgirish nxge_allocb_exit:
155344961713Sgirish 	return (nxge_mp);
155444961713Sgirish }
155544961713Sgirish 
155644961713Sgirish p_mblk_t
155744961713Sgirish nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size)
155844961713Sgirish {
155944961713Sgirish 	p_mblk_t mp;
156044961713Sgirish 
156144961713Sgirish 	NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb"));
156244961713Sgirish 	NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p "
156344961713Sgirish 		"offset = 0x%08X "
156444961713Sgirish 		"size = 0x%08X",
156544961713Sgirish 		nxge_mp, offset, size));
156644961713Sgirish 
156744961713Sgirish 	mp = desballoc(&nxge_mp->buffer[offset], size,
156844961713Sgirish 				0, &nxge_mp->freeb);
156944961713Sgirish 	if (mp == NULL) {
157044961713Sgirish 		NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
157144961713Sgirish 		goto nxge_dupb_exit;
157244961713Sgirish 	}
157344961713Sgirish 	atomic_inc_32(&nxge_mp->ref_cnt);
157414ea4bb7Ssd 	atomic_inc_32(&nxge_mblks_pending);
157544961713Sgirish 
157644961713Sgirish 
157744961713Sgirish nxge_dupb_exit:
157844961713Sgirish 	NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p",
157944961713Sgirish 		nxge_mp));
158044961713Sgirish 	return (mp);
158144961713Sgirish }
158244961713Sgirish 
158344961713Sgirish p_mblk_t
158444961713Sgirish nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size)
158544961713Sgirish {
158644961713Sgirish 	p_mblk_t mp;
158744961713Sgirish 	uchar_t *dp;
158844961713Sgirish 
158944961713Sgirish 	mp = allocb(size + NXGE_RXBUF_EXTRA, 0);
159044961713Sgirish 	if (mp == NULL) {
159144961713Sgirish 		NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
159244961713Sgirish 		goto nxge_dupb_bcopy_exit;
159344961713Sgirish 	}
159444961713Sgirish 	dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA;
159544961713Sgirish 	bcopy((void *)&nxge_mp->buffer[offset], dp, size);
159644961713Sgirish 	mp->b_wptr = dp + size;
159744961713Sgirish 
159844961713Sgirish nxge_dupb_bcopy_exit:
159944961713Sgirish 	NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p",
160044961713Sgirish 		nxge_mp));
160144961713Sgirish 	return (mp);
160244961713Sgirish }
160344961713Sgirish 
160444961713Sgirish void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p,
160544961713Sgirish 	p_rx_msg_t rx_msg_p);
160644961713Sgirish 
160744961713Sgirish void
160844961713Sgirish nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p)
160944961713Sgirish {
161044961713Sgirish 
161144961713Sgirish 	npi_handle_t		handle;
161244961713Sgirish 
161344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page"));
161444961713Sgirish 
161544961713Sgirish 	/* Reuse this buffer */
161644961713Sgirish 	rx_msg_p->free = B_FALSE;
161744961713Sgirish 	rx_msg_p->cur_usage_cnt = 0;
161844961713Sgirish 	rx_msg_p->max_usage_cnt = 0;
161944961713Sgirish 	rx_msg_p->pkt_buf_size = 0;
162044961713Sgirish 
162144961713Sgirish 	if (rx_rbr_p->rbr_use_bcopy) {
162244961713Sgirish 		rx_msg_p->rx_use_bcopy = B_FALSE;
162344961713Sgirish 		atomic_dec_32(&rx_rbr_p->rbr_consumed);
162444961713Sgirish 	}
162544961713Sgirish 
162644961713Sgirish 	/*
162744961713Sgirish 	 * Get the rbr header pointer and its offset index.
162844961713Sgirish 	 */
162944961713Sgirish 	MUTEX_ENTER(&rx_rbr_p->post_lock);
163044961713Sgirish 
163144961713Sgirish 
163244961713Sgirish 	rx_rbr_p->rbr_wr_index =  ((rx_rbr_p->rbr_wr_index + 1) &
163344961713Sgirish 					    rx_rbr_p->rbr_wrap_mask);
163444961713Sgirish 	rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr;
163544961713Sgirish 	MUTEX_EXIT(&rx_rbr_p->post_lock);
163644961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
163744961713Sgirish 	npi_rxdma_rdc_rbr_kick(handle, rx_rbr_p->rdc, 1);
163844961713Sgirish 
163944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
164044961713Sgirish 		"<== nxge_post_page (channel %d post_next_index %d)",
164144961713Sgirish 		rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index));
164244961713Sgirish 
164344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page"));
164444961713Sgirish }
164544961713Sgirish 
164644961713Sgirish void
164744961713Sgirish nxge_freeb(p_rx_msg_t rx_msg_p)
164844961713Sgirish {
164944961713Sgirish 	size_t size;
165044961713Sgirish 	uchar_t *buffer = NULL;
165144961713Sgirish 	int ref_cnt;
1652958cea9eSml 	boolean_t free_state = B_FALSE;
165344961713Sgirish 
1654007969e0Stm 	rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p;
1655007969e0Stm 
165644961713Sgirish 	NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb"));
165744961713Sgirish 	NXGE_DEBUG_MSG((NULL, MEM2_CTL,
165844961713Sgirish 		"nxge_freeb:rx_msg_p = $%p (block pending %d)",
165944961713Sgirish 		rx_msg_p, nxge_mblks_pending));
166044961713Sgirish 
166114ea4bb7Ssd 	atomic_dec_32(&nxge_mblks_pending);
1662958cea9eSml 	/*
1663958cea9eSml 	 * First we need to get the free state, then
1664958cea9eSml 	 * atomic decrement the reference count to prevent
1665958cea9eSml 	 * the race condition with the interrupt thread that
1666958cea9eSml 	 * is processing a loaned up buffer block.
1667958cea9eSml 	 */
1668958cea9eSml 	free_state = rx_msg_p->free;
1669958cea9eSml 	ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1);
167044961713Sgirish 	if (!ref_cnt) {
167144961713Sgirish 		buffer = rx_msg_p->buffer;
167244961713Sgirish 		size = rx_msg_p->block_size;
167344961713Sgirish 		NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: "
167444961713Sgirish 			"will free: rx_msg_p = $%p (block pending %d)",
167556d930aeSspeer 			rx_msg_p, nxge_mblks_pending));
167644961713Sgirish 
167744961713Sgirish 		if (!rx_msg_p->use_buf_pool) {
167844961713Sgirish 			KMEM_FREE(buffer, size);
167944961713Sgirish 		}
168014ea4bb7Ssd 
168114ea4bb7Ssd 		KMEM_FREE(rx_msg_p, sizeof (rx_msg_t));
1682007969e0Stm 
1683*3e82a89eSmisaki 		if (ring) {
1684*3e82a89eSmisaki 			/*
1685*3e82a89eSmisaki 			 * Decrement the receive buffer ring's reference
1686*3e82a89eSmisaki 			 * count, too.
1687*3e82a89eSmisaki 			 */
1688*3e82a89eSmisaki 			atomic_dec_32(&ring->rbr_ref_cnt);
1689007969e0Stm 
1690*3e82a89eSmisaki 			/*
1691*3e82a89eSmisaki 			 * Free the receive buffer ring, iff
1692*3e82a89eSmisaki 			 * 1. all the receive buffers have been freed
1693*3e82a89eSmisaki 			 * 2. and we are in the proper state (that is,
1694*3e82a89eSmisaki 			 *    we are not UNMAPPING).
1695*3e82a89eSmisaki 			 */
1696*3e82a89eSmisaki 			if (ring->rbr_ref_cnt == 0 &&
1697*3e82a89eSmisaki 			    ring->rbr_state == RBR_UNMAPPED) {
1698*3e82a89eSmisaki 				KMEM_FREE(ring, sizeof (*ring));
1699*3e82a89eSmisaki 			}
1700007969e0Stm 		}
170114ea4bb7Ssd 		return;
170244961713Sgirish 	}
170344961713Sgirish 
170444961713Sgirish 	/*
170544961713Sgirish 	 * Repost buffer.
170644961713Sgirish 	 */
1707*3e82a89eSmisaki 	if (free_state && (ref_cnt == 1) && ring) {
170844961713Sgirish 		NXGE_DEBUG_MSG((NULL, RX_CTL,
170944961713Sgirish 		    "nxge_freeb: post page $%p:", rx_msg_p));
1710007969e0Stm 		if (ring->rbr_state == RBR_POSTING)
1711007969e0Stm 			nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p);
171244961713Sgirish 	}
171344961713Sgirish 
171444961713Sgirish 	NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb"));
171544961713Sgirish }
171644961713Sgirish 
171744961713Sgirish uint_t
171844961713Sgirish nxge_rx_intr(void *arg1, void *arg2)
171944961713Sgirish {
172044961713Sgirish 	p_nxge_ldv_t		ldvp = (p_nxge_ldv_t)arg1;
172144961713Sgirish 	p_nxge_t		nxgep = (p_nxge_t)arg2;
172244961713Sgirish 	p_nxge_ldg_t		ldgp;
172344961713Sgirish 	uint8_t			channel;
172444961713Sgirish 	npi_handle_t		handle;
172544961713Sgirish 	rx_dma_ctl_stat_t	cs;
172644961713Sgirish 
172744961713Sgirish #ifdef	NXGE_DEBUG
172844961713Sgirish 	rxdma_cfig1_t		cfg;
172944961713Sgirish #endif
173044961713Sgirish 	uint_t 			serviced = DDI_INTR_UNCLAIMED;
173144961713Sgirish 
173244961713Sgirish 	if (ldvp == NULL) {
173344961713Sgirish 		NXGE_DEBUG_MSG((NULL, INT_CTL,
173444961713Sgirish 			"<== nxge_rx_intr: arg2 $%p arg1 $%p",
173544961713Sgirish 			nxgep, ldvp));
173644961713Sgirish 
173744961713Sgirish 		return (DDI_INTR_CLAIMED);
173844961713Sgirish 	}
173944961713Sgirish 
174044961713Sgirish 	if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
174144961713Sgirish 		nxgep = ldvp->nxgep;
174244961713Sgirish 	}
174344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
174444961713Sgirish 		"==> nxge_rx_intr: arg2 $%p arg1 $%p",
174544961713Sgirish 		nxgep, ldvp));
174644961713Sgirish 
174744961713Sgirish 	/*
174844961713Sgirish 	 * This interrupt handler is for a specific
174944961713Sgirish 	 * receive dma channel.
175044961713Sgirish 	 */
175144961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
175244961713Sgirish 	/*
175344961713Sgirish 	 * Get the control and status for this channel.
175444961713Sgirish 	 */
175544961713Sgirish 	channel = ldvp->channel;
175644961713Sgirish 	ldgp = ldvp->ldgp;
175744961713Sgirish 	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value);
175844961713Sgirish 
175944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d "
176044961713Sgirish 		"cs 0x%016llx rcrto 0x%x rcrthres %x",
176144961713Sgirish 		channel,
176244961713Sgirish 		cs.value,
176344961713Sgirish 		cs.bits.hdw.rcrto,
176444961713Sgirish 		cs.bits.hdw.rcrthres));
176544961713Sgirish 
176644961713Sgirish 	nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, ldvp, cs);
176744961713Sgirish 	serviced = DDI_INTR_CLAIMED;
176844961713Sgirish 
176944961713Sgirish 	/* error events. */
177044961713Sgirish 	if (cs.value & RX_DMA_CTL_STAT_ERROR) {
177144961713Sgirish 		(void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs);
177244961713Sgirish 	}
177344961713Sgirish 
177444961713Sgirish nxge_intr_exit:
177544961713Sgirish 
177644961713Sgirish 
177744961713Sgirish 	/*
177844961713Sgirish 	 * Enable the mailbox update interrupt if we want
177944961713Sgirish 	 * to use mailbox. We probably don't need to use
178044961713Sgirish 	 * mailbox as it only saves us one pio read.
178144961713Sgirish 	 * Also write 1 to rcrthres and rcrto to clear
178244961713Sgirish 	 * these two edge triggered bits.
178344961713Sgirish 	 */
178444961713Sgirish 
178544961713Sgirish 	cs.value &= RX_DMA_CTL_STAT_WR1C;
178644961713Sgirish 	cs.bits.hdw.mex = 1;
178744961713Sgirish 	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
178844961713Sgirish 			cs.value);
178944961713Sgirish 
179044961713Sgirish 	/*
179144961713Sgirish 	 * Rearm this logical group if this is a single device
179244961713Sgirish 	 * group.
179344961713Sgirish 	 */
179444961713Sgirish 	if (ldgp->nldvs == 1) {
179544961713Sgirish 		ldgimgm_t		mgm;
179644961713Sgirish 		mgm.value = 0;
179744961713Sgirish 		mgm.bits.ldw.arm = 1;
179844961713Sgirish 		mgm.bits.ldw.timer = ldgp->ldg_timer;
179944961713Sgirish 		NXGE_REG_WR64(handle,
180044961713Sgirish 			    LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
180144961713Sgirish 			    mgm.value);
180244961713Sgirish 	}
180344961713Sgirish 
180444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: serviced %d",
180544961713Sgirish 		serviced));
180644961713Sgirish 	return (serviced);
180744961713Sgirish }
180844961713Sgirish 
180944961713Sgirish /*
181044961713Sgirish  * Process the packets received in the specified logical device
181144961713Sgirish  * and pass up a chain of message blocks to the upper layer.
181244961713Sgirish  */
181344961713Sgirish static void
181444961713Sgirish nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, p_nxge_ldv_t ldvp,
181514ea4bb7Ssd 				    rx_dma_ctl_stat_t cs)
181644961713Sgirish {
181744961713Sgirish 	p_mblk_t		mp;
181844961713Sgirish 	p_rx_rcr_ring_t		rcrp;
181944961713Sgirish 
182044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring"));
182144961713Sgirish 	if ((mp = nxge_rx_pkts(nxgep, vindex, ldvp, &rcrp, cs)) == NULL) {
182244961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
182344961713Sgirish 			"<== nxge_rx_pkts_vring: no mp"));
182444961713Sgirish 		return;
182544961713Sgirish 	}
182644961713Sgirish 
182744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p",
182844961713Sgirish 		mp));
182944961713Sgirish 
183044961713Sgirish #ifdef  NXGE_DEBUG
183144961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
183244961713Sgirish 			"==> nxge_rx_pkts_vring:calling mac_rx "
183314ea4bb7Ssd 			"LEN %d mp $%p mp->b_cont $%p mp->b_next $%p rcrp $%p "
183444961713Sgirish 			"mac_handle $%p",
183514ea4bb7Ssd 			mp->b_wptr - mp->b_rptr,
183614ea4bb7Ssd 			mp, mp->b_cont, mp->b_next,
183744961713Sgirish 			rcrp, rcrp->rcr_mac_handle));
183844961713Sgirish 
183944961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
184044961713Sgirish 			"==> nxge_rx_pkts_vring: dump packets "
184144961713Sgirish 			"(mp $%p b_rptr $%p b_wptr $%p):\n %s",
184244961713Sgirish 			mp,
184344961713Sgirish 			mp->b_rptr,
184444961713Sgirish 			mp->b_wptr,
184514ea4bb7Ssd 			nxge_dump_packet((char *)mp->b_rptr,
184614ea4bb7Ssd 			mp->b_wptr - mp->b_rptr)));
184714ea4bb7Ssd 		if (mp->b_cont) {
184814ea4bb7Ssd 			NXGE_DEBUG_MSG((nxgep, RX_CTL,
184914ea4bb7Ssd 				"==> nxge_rx_pkts_vring: dump b_cont packets "
185014ea4bb7Ssd 				"(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s",
185114ea4bb7Ssd 				mp->b_cont,
185214ea4bb7Ssd 				mp->b_cont->b_rptr,
185314ea4bb7Ssd 				mp->b_cont->b_wptr,
185414ea4bb7Ssd 				nxge_dump_packet((char *)mp->b_cont->b_rptr,
185514ea4bb7Ssd 				mp->b_cont->b_wptr - mp->b_cont->b_rptr)));
185614ea4bb7Ssd 		}
185744961713Sgirish 		if (mp->b_next) {
185844961713Sgirish 			NXGE_DEBUG_MSG((nxgep, RX_CTL,
185944961713Sgirish 				"==> nxge_rx_pkts_vring: dump next packets "
186044961713Sgirish 				"(b_rptr $%p): %s",
186144961713Sgirish 				mp->b_next->b_rptr,
186244961713Sgirish 				nxge_dump_packet((char *)mp->b_next->b_rptr,
186314ea4bb7Ssd 				mp->b_next->b_wptr - mp->b_next->b_rptr)));
186444961713Sgirish 		}
186544961713Sgirish #endif
186644961713Sgirish 
186744961713Sgirish 	mac_rx(nxgep->mach, rcrp->rcr_mac_handle, mp);
186844961713Sgirish }
186944961713Sgirish 
187044961713Sgirish 
187144961713Sgirish /*
187244961713Sgirish  * This routine is the main packet receive processing function.
187344961713Sgirish  * It gets the packet type, error code, and buffer related
187444961713Sgirish  * information from the receive completion entry.
187544961713Sgirish  * How many completion entries to process is based on the number of packets
187644961713Sgirish  * queued by the hardware, a hardware maintained tail pointer
187744961713Sgirish  * and a configurable receive packet count.
187844961713Sgirish  *
187944961713Sgirish  * A chain of message blocks will be created as result of processing
188044961713Sgirish  * the completion entries. This chain of message blocks will be returned and
188144961713Sgirish  * a hardware control status register will be updated with the number of
188244961713Sgirish  * packets were removed from the hardware queue.
188344961713Sgirish  *
188444961713Sgirish  */
188544961713Sgirish mblk_t *
188644961713Sgirish nxge_rx_pkts(p_nxge_t nxgep, uint_t vindex, p_nxge_ldv_t ldvp,
188744961713Sgirish     p_rx_rcr_ring_t *rcrp, rx_dma_ctl_stat_t cs)
188844961713Sgirish {
188944961713Sgirish 	npi_handle_t		handle;
189044961713Sgirish 	uint8_t			channel;
189144961713Sgirish 	p_rx_rcr_rings_t	rx_rcr_rings;
189244961713Sgirish 	p_rx_rcr_ring_t		rcr_p;
189344961713Sgirish 	uint32_t		comp_rd_index;
189444961713Sgirish 	p_rcr_entry_t		rcr_desc_rd_head_p;
189544961713Sgirish 	p_rcr_entry_t		rcr_desc_rd_head_pp;
189644961713Sgirish 	p_mblk_t		nmp, mp_cont, head_mp, *tail_mp;
189744961713Sgirish 	uint16_t		qlen, nrcr_read, npkt_read;
189844961713Sgirish 	uint32_t qlen_hw;
189944961713Sgirish 	boolean_t		multi;
190014ea4bb7Ssd 	rcrcfig_b_t rcr_cfg_b;
1901a3c5bd6dSspeer #if defined(_BIG_ENDIAN)
190244961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
190344961713Sgirish #endif
190444961713Sgirish 
190544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:vindex %d "
190644961713Sgirish 		"channel %d", vindex, ldvp->channel));
190744961713Sgirish 
190844961713Sgirish 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
190944961713Sgirish 		return (NULL);
191044961713Sgirish 	}
191144961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
191244961713Sgirish 	rx_rcr_rings = nxgep->rx_rcr_rings;
191344961713Sgirish 	rcr_p = rx_rcr_rings->rcr_rings[vindex];
191444961713Sgirish 	channel = rcr_p->rdc;
191544961713Sgirish 	if (channel != ldvp->channel) {
191644961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:index %d "
191744961713Sgirish 			"channel %d, and rcr channel %d not matched.",
191844961713Sgirish 			vindex, ldvp->channel, channel));
191944961713Sgirish 		return (NULL);
192044961713Sgirish 	}
192144961713Sgirish 
192244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
192344961713Sgirish 		"==> nxge_rx_pkts: START: rcr channel %d "
192444961713Sgirish 		"head_p $%p head_pp $%p  index %d ",
192544961713Sgirish 		channel, rcr_p->rcr_desc_rd_head_p,
192644961713Sgirish 		rcr_p->rcr_desc_rd_head_pp,
192744961713Sgirish 		rcr_p->comp_rd_index));
192844961713Sgirish 
192944961713Sgirish 
1930a3c5bd6dSspeer #if !defined(_BIG_ENDIAN)
193144961713Sgirish 	qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff;
193244961713Sgirish #else
193344961713Sgirish 	rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen);
193444961713Sgirish 	if (rs != NPI_SUCCESS) {
193544961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:index %d "
193644961713Sgirish 		"channel %d, get qlen failed 0x%08x",
193744961713Sgirish 		vindex, ldvp->channel, rs));
193844961713Sgirish 		return (NULL);
193944961713Sgirish 	}
194044961713Sgirish #endif
194144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d "
194244961713Sgirish 		"qlen %d", channel, qlen));
194344961713Sgirish 
194444961713Sgirish 
194544961713Sgirish 
194644961713Sgirish 	if (!qlen) {
194744961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
194844961713Sgirish 			"==> nxge_rx_pkts:rcr channel %d "
194944961713Sgirish 			"qlen %d (no pkts)", channel, qlen));
195044961713Sgirish 
195144961713Sgirish 		return (NULL);
195244961713Sgirish 	}
195344961713Sgirish 
195444961713Sgirish 	comp_rd_index = rcr_p->comp_rd_index;
195544961713Sgirish 
195644961713Sgirish 	rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p;
195744961713Sgirish 	rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp;
195844961713Sgirish 	nrcr_read = npkt_read = 0;
195944961713Sgirish 
196044961713Sgirish 	/*
196144961713Sgirish 	 * Number of packets queued
196244961713Sgirish 	 * (The jumbo or multi packet will be counted as only one
196344961713Sgirish 	 *  packets and it may take up more than one completion entry).
196444961713Sgirish 	 */
196544961713Sgirish 	qlen_hw = (qlen < nxge_max_rx_pkts) ?
196644961713Sgirish 		qlen : nxge_max_rx_pkts;
196744961713Sgirish 	head_mp = NULL;
196844961713Sgirish 	tail_mp = &head_mp;
196944961713Sgirish 	nmp = mp_cont = NULL;
197044961713Sgirish 	multi = B_FALSE;
197144961713Sgirish 
1972a3c5bd6dSspeer 	while (qlen_hw) {
197344961713Sgirish 
197444961713Sgirish #ifdef NXGE_DEBUG
197544961713Sgirish 		nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p);
197644961713Sgirish #endif
197744961713Sgirish 		/*
197844961713Sgirish 		 * Process one completion ring entry.
197944961713Sgirish 		 */
198044961713Sgirish 		nxge_receive_packet(nxgep,
198144961713Sgirish 			rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont);
198244961713Sgirish 
198344961713Sgirish 		/*
198444961713Sgirish 		 * message chaining modes
198544961713Sgirish 		 */
198614ea4bb7Ssd 		if (nmp) {
198744961713Sgirish 			nmp->b_next = NULL;
198814ea4bb7Ssd 			if (!multi && !mp_cont) { /* frame fits a partition */
198914ea4bb7Ssd 				*tail_mp = nmp;
199014ea4bb7Ssd 				tail_mp = &nmp->b_next;
199114ea4bb7Ssd 				nmp = NULL;
199214ea4bb7Ssd 			} else if (multi && !mp_cont) { /* first segment */
199314ea4bb7Ssd 				*tail_mp = nmp;
199414ea4bb7Ssd 				tail_mp = &nmp->b_cont;
199514ea4bb7Ssd 			} else if (multi && mp_cont) {	/* mid of multi segs */
199614ea4bb7Ssd 				*tail_mp = mp_cont;
199714ea4bb7Ssd 				tail_mp = &mp_cont->b_cont;
199814ea4bb7Ssd 			} else if (!multi && mp_cont) { /* last segment */
1999a3c5bd6dSspeer 				*tail_mp = mp_cont;
200014ea4bb7Ssd 				tail_mp = &nmp->b_next;
200114ea4bb7Ssd 				nmp = NULL;
200214ea4bb7Ssd 			}
200344961713Sgirish 		}
200444961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
200544961713Sgirish 			"==> nxge_rx_pkts: loop: rcr channel %d "
200644961713Sgirish 			"before updating: multi %d "
200744961713Sgirish 			"nrcr_read %d "
200844961713Sgirish 			"npk read %d "
200944961713Sgirish 			"head_pp $%p  index %d ",
201044961713Sgirish 			channel,
201144961713Sgirish 			multi,
201244961713Sgirish 			nrcr_read, npkt_read, rcr_desc_rd_head_pp,
201344961713Sgirish 			comp_rd_index));
201444961713Sgirish 
201544961713Sgirish 		if (!multi) {
201644961713Sgirish 			qlen_hw--;
201744961713Sgirish 			npkt_read++;
201844961713Sgirish 		}
201944961713Sgirish 
202044961713Sgirish 		/*
202144961713Sgirish 		 * Update the next read entry.
202244961713Sgirish 		 */
202344961713Sgirish 		comp_rd_index = NEXT_ENTRY(comp_rd_index,
202444961713Sgirish 					rcr_p->comp_wrap_mask);
202544961713Sgirish 
202644961713Sgirish 		rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p,
202744961713Sgirish 				rcr_p->rcr_desc_first_p,
202844961713Sgirish 				rcr_p->rcr_desc_last_p);
202944961713Sgirish 
203044961713Sgirish 		nrcr_read++;
203144961713Sgirish 
203244961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
203344961713Sgirish 			"<== nxge_rx_pkts: (SAM, process one packet) "
203444961713Sgirish 			"nrcr_read %d",
203544961713Sgirish 			nrcr_read));
203644961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
203744961713Sgirish 			"==> nxge_rx_pkts: loop: rcr channel %d "
203844961713Sgirish 			"multi %d "
203944961713Sgirish 			"nrcr_read %d "
204044961713Sgirish 			"npk read %d "
204144961713Sgirish 			"head_pp $%p  index %d ",
204244961713Sgirish 			channel,
204344961713Sgirish 			multi,
204444961713Sgirish 			nrcr_read, npkt_read, rcr_desc_rd_head_pp,
204544961713Sgirish 			comp_rd_index));
204644961713Sgirish 
204744961713Sgirish 	}
204844961713Sgirish 
204944961713Sgirish 	rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp;
205044961713Sgirish 	rcr_p->comp_rd_index = comp_rd_index;
205144961713Sgirish 	rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p;
205244961713Sgirish 
205314ea4bb7Ssd 	if ((nxgep->intr_timeout != rcr_p->intr_timeout) ||
205414ea4bb7Ssd 		(nxgep->intr_threshold != rcr_p->intr_threshold)) {
205514ea4bb7Ssd 		rcr_p->intr_timeout = nxgep->intr_timeout;
205614ea4bb7Ssd 		rcr_p->intr_threshold = nxgep->intr_threshold;
205714ea4bb7Ssd 		rcr_cfg_b.value = 0x0ULL;
205814ea4bb7Ssd 		if (rcr_p->intr_timeout)
205914ea4bb7Ssd 			rcr_cfg_b.bits.ldw.entout = 1;
206014ea4bb7Ssd 		rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout;
206114ea4bb7Ssd 		rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold;
206214ea4bb7Ssd 		RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG,
206314ea4bb7Ssd 				    channel, rcr_cfg_b.value);
206414ea4bb7Ssd 	}
206544961713Sgirish 
206644961713Sgirish 	cs.bits.ldw.pktread = npkt_read;
206744961713Sgirish 	cs.bits.ldw.ptrread = nrcr_read;
206844961713Sgirish 	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
206944961713Sgirish 			    channel, cs.value);
207044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
207144961713Sgirish 		"==> nxge_rx_pkts: EXIT: rcr channel %d "
207244961713Sgirish 		"head_pp $%p  index %016llx ",
207344961713Sgirish 		channel,
207444961713Sgirish 		rcr_p->rcr_desc_rd_head_pp,
207544961713Sgirish 		rcr_p->comp_rd_index));
207644961713Sgirish 	/*
207744961713Sgirish 	 * Update RCR buffer pointer read and number of packets
207844961713Sgirish 	 * read.
207944961713Sgirish 	 */
208044961713Sgirish 
208144961713Sgirish 	*rcrp = rcr_p;
208244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_pkts"));
208344961713Sgirish 	return (head_mp);
208444961713Sgirish }
208544961713Sgirish 
208644961713Sgirish void
208744961713Sgirish nxge_receive_packet(p_nxge_t nxgep,
208844961713Sgirish     p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p,
208944961713Sgirish     boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont)
209044961713Sgirish {
209144961713Sgirish 	p_mblk_t		nmp = NULL;
209244961713Sgirish 	uint64_t		multi;
209344961713Sgirish 	uint64_t		dcf_err;
209444961713Sgirish 	uint8_t			channel;
209544961713Sgirish 
209644961713Sgirish 	boolean_t		first_entry = B_TRUE;
209744961713Sgirish 	boolean_t		is_tcp_udp = B_FALSE;
209844961713Sgirish 	boolean_t		buffer_free = B_FALSE;
209944961713Sgirish 	boolean_t		error_send_up = B_FALSE;
210044961713Sgirish 	uint8_t			error_type;
210144961713Sgirish 	uint16_t		l2_len;
210244961713Sgirish 	uint16_t		skip_len;
210344961713Sgirish 	uint8_t			pktbufsz_type;
210444961713Sgirish 	uint64_t		rcr_entry;
210544961713Sgirish 	uint64_t		*pkt_buf_addr_pp;
210644961713Sgirish 	uint64_t		*pkt_buf_addr_p;
210744961713Sgirish 	uint32_t		buf_offset;
210844961713Sgirish 	uint32_t		bsize;
210944961713Sgirish 	uint32_t		error_disp_cnt;
211044961713Sgirish 	uint32_t		msg_index;
211144961713Sgirish 	p_rx_rbr_ring_t		rx_rbr_p;
211244961713Sgirish 	p_rx_msg_t 		*rx_msg_ring_p;
211344961713Sgirish 	p_rx_msg_t		rx_msg_p;
211444961713Sgirish 	uint16_t		sw_offset_bytes = 0, hdr_size = 0;
211544961713Sgirish 	nxge_status_t		status = NXGE_OK;
211644961713Sgirish 	boolean_t		is_valid = B_FALSE;
211744961713Sgirish 	p_nxge_rx_ring_stats_t	rdc_stats;
2118a3c5bd6dSspeer 	uint32_t		bytes_read;
2119a3c5bd6dSspeer 	uint64_t		pkt_type;
2120a3c5bd6dSspeer 	uint64_t		frag;
212144961713Sgirish #ifdef	NXGE_DEBUG
212244961713Sgirish 	int			dump_len;
212344961713Sgirish #endif
212444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet"));
212544961713Sgirish 	first_entry = (*mp == NULL) ? B_TRUE : B_FALSE;
212644961713Sgirish 
212744961713Sgirish 	rcr_entry = *((uint64_t *)rcr_desc_rd_head_p);
212844961713Sgirish 
212944961713Sgirish 	multi = (rcr_entry & RCR_MULTI_MASK);
213044961713Sgirish 	dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK);
213144961713Sgirish 	pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK);
213244961713Sgirish 
213344961713Sgirish 	error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT);
213444961713Sgirish 	frag = (rcr_entry & RCR_FRAG_MASK);
213544961713Sgirish 
213644961713Sgirish 	l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT);
213744961713Sgirish 
213844961713Sgirish 	pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >>
213944961713Sgirish 				RCR_PKTBUFSZ_SHIFT);
2140adfcba55Sjoycey #if defined(__i386)
2141adfcba55Sjoycey 	pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry &
2142adfcba55Sjoycey 			RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT);
2143adfcba55Sjoycey #else
214444961713Sgirish 	pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) <<
214544961713Sgirish 			RCR_PKT_BUF_ADDR_SHIFT);
2146adfcba55Sjoycey #endif
214744961713Sgirish 
214844961713Sgirish 	channel = rcr_p->rdc;
214944961713Sgirish 
215044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
215144961713Sgirish 		"==> nxge_receive_packet: entryp $%p entry 0x%0llx "
215214ea4bb7Ssd 		"pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
215344961713Sgirish 		"error_type 0x%x pkt_type 0x%x  "
215444961713Sgirish 		"pktbufsz_type %d ",
215544961713Sgirish 		rcr_desc_rd_head_p,
215644961713Sgirish 		rcr_entry, pkt_buf_addr_pp, l2_len,
215744961713Sgirish 		multi,
215844961713Sgirish 		error_type,
215944961713Sgirish 		pkt_type,
216044961713Sgirish 		pktbufsz_type));
216144961713Sgirish 
216244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
216344961713Sgirish 		"==> nxge_receive_packet: entryp $%p entry 0x%0llx "
216414ea4bb7Ssd 		"pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
216544961713Sgirish 		"error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p,
216644961713Sgirish 		rcr_entry, pkt_buf_addr_pp, l2_len,
216744961713Sgirish 		multi,
216844961713Sgirish 		error_type,
216944961713Sgirish 		pkt_type));
217044961713Sgirish 
217144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
217244961713Sgirish 		"==> (rbr) nxge_receive_packet: entry 0x%0llx "
217344961713Sgirish 		"full pkt_buf_addr_pp $%p l2_len %d",
217444961713Sgirish 		rcr_entry, pkt_buf_addr_pp, l2_len));
217544961713Sgirish 
217644961713Sgirish 	/* get the stats ptr */
217744961713Sgirish 	rdc_stats = rcr_p->rdc_stats;
217844961713Sgirish 
217944961713Sgirish 	if (!l2_len) {
218044961713Sgirish 
218144961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
218244961713Sgirish 			"<== nxge_receive_packet: failed: l2 length is 0."));
218344961713Sgirish 		return;
218444961713Sgirish 	}
218544961713Sgirish 
218656d930aeSspeer 	/* Hardware sends us 4 bytes of CRC as no stripping is done.  */
218756d930aeSspeer 	l2_len -= ETHERFCSL;
218856d930aeSspeer 
218944961713Sgirish 	/* shift 6 bits to get the full io address */
2190adfcba55Sjoycey #if defined(__i386)
2191adfcba55Sjoycey 	pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp <<
2192adfcba55Sjoycey 				RCR_PKT_BUF_ADDR_SHIFT_FULL);
2193adfcba55Sjoycey #else
219444961713Sgirish 	pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp <<
219544961713Sgirish 				RCR_PKT_BUF_ADDR_SHIFT_FULL);
2196adfcba55Sjoycey #endif
219744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
219844961713Sgirish 		"==> (rbr) nxge_receive_packet: entry 0x%0llx "
219944961713Sgirish 		"full pkt_buf_addr_pp $%p l2_len %d",
220044961713Sgirish 		rcr_entry, pkt_buf_addr_pp, l2_len));
220144961713Sgirish 
220244961713Sgirish 	rx_rbr_p = rcr_p->rx_rbr_p;
220344961713Sgirish 	rx_msg_ring_p = rx_rbr_p->rx_msg_ring;
220444961713Sgirish 
220544961713Sgirish 	if (first_entry) {
220644961713Sgirish 		hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL :
220744961713Sgirish 			RXDMA_HDR_SIZE_DEFAULT);
220844961713Sgirish 
220944961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
221044961713Sgirish 			"==> nxge_receive_packet: first entry 0x%016llx "
221144961713Sgirish 			"pkt_buf_addr_pp $%p l2_len %d hdr %d",
221244961713Sgirish 			rcr_entry, pkt_buf_addr_pp, l2_len,
221344961713Sgirish 			hdr_size));
221444961713Sgirish 	}
221544961713Sgirish 
221644961713Sgirish 	MUTEX_ENTER(&rcr_p->lock);
221744961713Sgirish 	MUTEX_ENTER(&rx_rbr_p->lock);
221844961713Sgirish 
221944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
222044961713Sgirish 		"==> (rbr 1) nxge_receive_packet: entry 0x%0llx "
222144961713Sgirish 		"full pkt_buf_addr_pp $%p l2_len %d",
222244961713Sgirish 		rcr_entry, pkt_buf_addr_pp, l2_len));
222344961713Sgirish 
222444961713Sgirish 	/*
222544961713Sgirish 	 * Packet buffer address in the completion entry points
222644961713Sgirish 	 * to the starting buffer address (offset 0).
222744961713Sgirish 	 * Use the starting buffer address to locate the corresponding
222844961713Sgirish 	 * kernel address.
222944961713Sgirish 	 */
223044961713Sgirish 	status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p,
223144961713Sgirish 			pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p,
223244961713Sgirish 			&buf_offset,
223344961713Sgirish 			&msg_index);
223444961713Sgirish 
223544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
223644961713Sgirish 		"==> (rbr 2) nxge_receive_packet: entry 0x%0llx "
223744961713Sgirish 		"full pkt_buf_addr_pp $%p l2_len %d",
223844961713Sgirish 		rcr_entry, pkt_buf_addr_pp, l2_len));
223944961713Sgirish 
224044961713Sgirish 	if (status != NXGE_OK) {
224144961713Sgirish 		MUTEX_EXIT(&rx_rbr_p->lock);
224244961713Sgirish 		MUTEX_EXIT(&rcr_p->lock);
224344961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
224444961713Sgirish 			"<== nxge_receive_packet: found vaddr failed %d",
224544961713Sgirish 				status));
224644961713Sgirish 		return;
224744961713Sgirish 	}
224844961713Sgirish 
224944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
225044961713Sgirish 		"==> (rbr 3) nxge_receive_packet: entry 0x%0llx "
225144961713Sgirish 		"full pkt_buf_addr_pp $%p l2_len %d",
225244961713Sgirish 		rcr_entry, pkt_buf_addr_pp, l2_len));
225344961713Sgirish 
225444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
225544961713Sgirish 		"==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
225644961713Sgirish 		"full pkt_buf_addr_pp $%p l2_len %d",
225744961713Sgirish 		msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
225844961713Sgirish 
225944961713Sgirish 	rx_msg_p = rx_msg_ring_p[msg_index];
226044961713Sgirish 
226144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
226244961713Sgirish 		"==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
226344961713Sgirish 		"full pkt_buf_addr_pp $%p l2_len %d",
226444961713Sgirish 		msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
226544961713Sgirish 
226644961713Sgirish 	switch (pktbufsz_type) {
226744961713Sgirish 	case RCR_PKTBUFSZ_0:
226844961713Sgirish 		bsize = rx_rbr_p->pkt_buf_size0_bytes;
226944961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
227044961713Sgirish 			"==> nxge_receive_packet: 0 buf %d", bsize));
227144961713Sgirish 		break;
227244961713Sgirish 	case RCR_PKTBUFSZ_1:
227344961713Sgirish 		bsize = rx_rbr_p->pkt_buf_size1_bytes;
227444961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
227544961713Sgirish 			"==> nxge_receive_packet: 1 buf %d", bsize));
227644961713Sgirish 		break;
227744961713Sgirish 	case RCR_PKTBUFSZ_2:
227844961713Sgirish 		bsize = rx_rbr_p->pkt_buf_size2_bytes;
227944961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
228044961713Sgirish 			"==> nxge_receive_packet: 2 buf %d", bsize));
228144961713Sgirish 		break;
228244961713Sgirish 	case RCR_SINGLE_BLOCK:
228344961713Sgirish 		bsize = rx_msg_p->block_size;
228444961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
228544961713Sgirish 			"==> nxge_receive_packet: single %d", bsize));
228644961713Sgirish 
228744961713Sgirish 		break;
228844961713Sgirish 	default:
228944961713Sgirish 		MUTEX_EXIT(&rx_rbr_p->lock);
229044961713Sgirish 		MUTEX_EXIT(&rcr_p->lock);
229144961713Sgirish 		return;
229244961713Sgirish 	}
229344961713Sgirish 
229444961713Sgirish 	DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma,
229544961713Sgirish 		(buf_offset + sw_offset_bytes),
229644961713Sgirish 		(hdr_size + l2_len),
229744961713Sgirish 		DDI_DMA_SYNC_FORCPU);
229844961713Sgirish 
229944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
230044961713Sgirish 		"==> nxge_receive_packet: after first dump:usage count"));
230144961713Sgirish 
230244961713Sgirish 	if (rx_msg_p->cur_usage_cnt == 0) {
230344961713Sgirish 		if (rx_rbr_p->rbr_use_bcopy) {
230444961713Sgirish 			atomic_inc_32(&rx_rbr_p->rbr_consumed);
230544961713Sgirish 			if (rx_rbr_p->rbr_consumed <
230644961713Sgirish 					rx_rbr_p->rbr_threshold_hi) {
230744961713Sgirish 				if (rx_rbr_p->rbr_threshold_lo == 0 ||
230844961713Sgirish 					((rx_rbr_p->rbr_consumed >=
230944961713Sgirish 						rx_rbr_p->rbr_threshold_lo) &&
231044961713Sgirish 						(rx_rbr_p->rbr_bufsize_type >=
231144961713Sgirish 							pktbufsz_type))) {
231244961713Sgirish 					rx_msg_p->rx_use_bcopy = B_TRUE;
231344961713Sgirish 				}
231444961713Sgirish 			} else {
231544961713Sgirish 				rx_msg_p->rx_use_bcopy = B_TRUE;
231644961713Sgirish 			}
231744961713Sgirish 		}
231844961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
231944961713Sgirish 			"==> nxge_receive_packet: buf %d (new block) ",
232044961713Sgirish 			bsize));
232144961713Sgirish 
232244961713Sgirish 		rx_msg_p->pkt_buf_size_code = pktbufsz_type;
232344961713Sgirish 		rx_msg_p->pkt_buf_size = bsize;
232444961713Sgirish 		rx_msg_p->cur_usage_cnt = 1;
232544961713Sgirish 		if (pktbufsz_type == RCR_SINGLE_BLOCK) {
232644961713Sgirish 			NXGE_DEBUG_MSG((nxgep, RX2_CTL,
232744961713Sgirish 				"==> nxge_receive_packet: buf %d "
232844961713Sgirish 				"(single block) ",
232944961713Sgirish 				bsize));
233044961713Sgirish 			/*
233144961713Sgirish 			 * Buffer can be reused once the free function
233244961713Sgirish 			 * is called.
233344961713Sgirish 			 */
233444961713Sgirish 			rx_msg_p->max_usage_cnt = 1;
233544961713Sgirish 			buffer_free = B_TRUE;
233644961713Sgirish 		} else {
233744961713Sgirish 			rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize;
233844961713Sgirish 			if (rx_msg_p->max_usage_cnt == 1) {
233944961713Sgirish 				buffer_free = B_TRUE;
234044961713Sgirish 			}
234144961713Sgirish 		}
234244961713Sgirish 	} else {
234344961713Sgirish 		rx_msg_p->cur_usage_cnt++;
234444961713Sgirish 		if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) {
234544961713Sgirish 			buffer_free = B_TRUE;
234644961713Sgirish 		}
234744961713Sgirish 	}
234844961713Sgirish 
234944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
235044961713Sgirish 	    "msgbuf index = %d l2len %d bytes usage %d max_usage %d ",
235144961713Sgirish 		msg_index, l2_len,
235244961713Sgirish 		rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt));
235344961713Sgirish 
235444961713Sgirish 	if ((error_type) || (dcf_err)) {
235544961713Sgirish 		rdc_stats->ierrors++;
235644961713Sgirish 		if (dcf_err) {
235744961713Sgirish 			rdc_stats->dcf_err++;
235844961713Sgirish #ifdef	NXGE_DEBUG
235944961713Sgirish 			if (!rdc_stats->dcf_err) {
236044961713Sgirish 				NXGE_DEBUG_MSG((nxgep, RX_CTL,
236144961713Sgirish 				"nxge_receive_packet: channel %d dcf_err rcr"
236244961713Sgirish 				" 0x%llx", channel, rcr_entry));
236344961713Sgirish 			}
236444961713Sgirish #endif
236544961713Sgirish 			NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL,
236644961713Sgirish 					NXGE_FM_EREPORT_RDMC_DCF_ERR);
236744961713Sgirish 		} else {
236844961713Sgirish 				/* Update error stats */
236944961713Sgirish 			error_disp_cnt = NXGE_ERROR_SHOW_MAX;
237044961713Sgirish 			rdc_stats->errlog.compl_err_type = error_type;
237144961713Sgirish 
237244961713Sgirish 			switch (error_type) {
2373f6485eecSyc 			/*
2374f6485eecSyc 			 * Do not send FMA ereport for RCR_L2_ERROR and
2375f6485eecSyc 			 * RCR_L4_CSUM_ERROR because most likely they indicate
2376f6485eecSyc 			 * back pressure rather than HW failures.
2377f6485eecSyc 			 */
237853f3d8ecSyc 			case RCR_L2_ERROR:
237953f3d8ecSyc 				rdc_stats->l2_err++;
238053f3d8ecSyc 				if (rdc_stats->l2_err <
238153f3d8ecSyc 				    error_disp_cnt) {
238244961713Sgirish 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
238353f3d8ecSyc 					    " nxge_receive_packet:"
238453f3d8ecSyc 					    " channel %d RCR L2_ERROR",
238553f3d8ecSyc 					    channel));
238653f3d8ecSyc 				}
238753f3d8ecSyc 				break;
238853f3d8ecSyc 			case RCR_L4_CSUM_ERROR:
238953f3d8ecSyc 				error_send_up = B_TRUE;
239053f3d8ecSyc 				rdc_stats->l4_cksum_err++;
239153f3d8ecSyc 				if (rdc_stats->l4_cksum_err <
239253f3d8ecSyc 				    error_disp_cnt) {
239353f3d8ecSyc 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
239453f3d8ecSyc 					    " nxge_receive_packet:"
239553f3d8ecSyc 					    " channel %d"
239653f3d8ecSyc 					    " RCR L4_CSUM_ERROR", channel));
239753f3d8ecSyc 				}
239853f3d8ecSyc 				break;
2399f6485eecSyc 			/*
2400f6485eecSyc 			 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and
2401f6485eecSyc 			 * RCR_ZCP_SOFT_ERROR because they reflect the same
2402f6485eecSyc 			 * FFLP and ZCP errors that have been reported by
2403f6485eecSyc 			 * nxge_fflp.c and nxge_zcp.c.
2404f6485eecSyc 			 */
240553f3d8ecSyc 			case RCR_FFLP_SOFT_ERROR:
240653f3d8ecSyc 				error_send_up = B_TRUE;
240753f3d8ecSyc 				rdc_stats->fflp_soft_err++;
240853f3d8ecSyc 				if (rdc_stats->fflp_soft_err <
240953f3d8ecSyc 				    error_disp_cnt) {
241053f3d8ecSyc 					NXGE_ERROR_MSG((nxgep,
241153f3d8ecSyc 					    NXGE_ERR_CTL,
241253f3d8ecSyc 					    " nxge_receive_packet:"
241353f3d8ecSyc 					    " channel %d"
241453f3d8ecSyc 					    " RCR FFLP_SOFT_ERROR", channel));
241553f3d8ecSyc 				}
241653f3d8ecSyc 				break;
241753f3d8ecSyc 			case RCR_ZCP_SOFT_ERROR:
241853f3d8ecSyc 				error_send_up = B_TRUE;
241953f3d8ecSyc 				rdc_stats->fflp_soft_err++;
242053f3d8ecSyc 				if (rdc_stats->zcp_soft_err <
242153f3d8ecSyc 				    error_disp_cnt)
242253f3d8ecSyc 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
242353f3d8ecSyc 					    " nxge_receive_packet: Channel %d"
242453f3d8ecSyc 					    " RCR ZCP_SOFT_ERROR", channel));
242553f3d8ecSyc 				break;
242653f3d8ecSyc 			default:
242753f3d8ecSyc 				rdc_stats->rcr_unknown_err++;
242853f3d8ecSyc 				if (rdc_stats->rcr_unknown_err
242953f3d8ecSyc 				    < error_disp_cnt) {
243053f3d8ecSyc 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
243153f3d8ecSyc 					    " nxge_receive_packet: Channel %d"
243253f3d8ecSyc 					    " RCR entry 0x%llx error 0x%x",
243353f3d8ecSyc 					    rcr_entry, channel, error_type));
243453f3d8ecSyc 				}
243553f3d8ecSyc 				break;
243644961713Sgirish 			}
243744961713Sgirish 		}
243844961713Sgirish 
243944961713Sgirish 		/*
244044961713Sgirish 		 * Update and repost buffer block if max usage
244144961713Sgirish 		 * count is reached.
244244961713Sgirish 		 */
244344961713Sgirish 		if (error_send_up == B_FALSE) {
2444958cea9eSml 			atomic_inc_32(&rx_msg_p->ref_cnt);
2445958cea9eSml 			atomic_inc_32(&nxge_mblks_pending);
244644961713Sgirish 			if (buffer_free == B_TRUE) {
244744961713Sgirish 				rx_msg_p->free = B_TRUE;
244844961713Sgirish 			}
244944961713Sgirish 
245044961713Sgirish 			MUTEX_EXIT(&rx_rbr_p->lock);
245144961713Sgirish 			MUTEX_EXIT(&rcr_p->lock);
245244961713Sgirish 			nxge_freeb(rx_msg_p);
245344961713Sgirish 			return;
245444961713Sgirish 		}
245544961713Sgirish 	}
245644961713Sgirish 
245744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
245844961713Sgirish 		"==> nxge_receive_packet: DMA sync second "));
245944961713Sgirish 
246053f3d8ecSyc 	bytes_read = rcr_p->rcvd_pkt_bytes;
246144961713Sgirish 	skip_len = sw_offset_bytes + hdr_size;
246244961713Sgirish 	if (!rx_msg_p->rx_use_bcopy) {
2463958cea9eSml 		/*
2464958cea9eSml 		 * For loaned up buffers, the driver reference count
2465958cea9eSml 		 * will be incremented first and then the free state.
2466958cea9eSml 		 */
246753f3d8ecSyc 		if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) {
246814ea4bb7Ssd 			if (first_entry) {
246914ea4bb7Ssd 				nmp->b_rptr = &nmp->b_rptr[skip_len];
247053f3d8ecSyc 				if (l2_len < bsize - skip_len) {
247114ea4bb7Ssd 					nmp->b_wptr = &nmp->b_rptr[l2_len];
247253f3d8ecSyc 				} else {
247353f3d8ecSyc 					nmp->b_wptr = &nmp->b_rptr[bsize
247453f3d8ecSyc 					    - skip_len];
247553f3d8ecSyc 				}
247614ea4bb7Ssd 			} else {
247753f3d8ecSyc 				if (l2_len - bytes_read < bsize) {
247814ea4bb7Ssd 					nmp->b_wptr =
247914ea4bb7Ssd 					    &nmp->b_rptr[l2_len - bytes_read];
248053f3d8ecSyc 				} else {
248153f3d8ecSyc 					nmp->b_wptr = &nmp->b_rptr[bsize];
248253f3d8ecSyc 				}
248314ea4bb7Ssd 			}
248444961713Sgirish 		}
248553f3d8ecSyc 	} else {
248653f3d8ecSyc 		if (first_entry) {
248753f3d8ecSyc 			nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len,
248853f3d8ecSyc 			    l2_len < bsize - skip_len ?
248953f3d8ecSyc 			    l2_len : bsize - skip_len);
249053f3d8ecSyc 		} else {
249153f3d8ecSyc 			nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset,
249253f3d8ecSyc 			    l2_len - bytes_read < bsize ?
249353f3d8ecSyc 			    l2_len - bytes_read : bsize);
249453f3d8ecSyc 		}
249553f3d8ecSyc 	}
249653f3d8ecSyc 	if (nmp != NULL) {
249753f3d8ecSyc 		if (first_entry)
249853f3d8ecSyc 			bytes_read  = nmp->b_wptr - nmp->b_rptr;
249953f3d8ecSyc 		else
250053f3d8ecSyc 			bytes_read += nmp->b_wptr - nmp->b_rptr;
250153f3d8ecSyc 
250253f3d8ecSyc 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
250353f3d8ecSyc 		    "==> nxge_receive_packet after dupb: "
250453f3d8ecSyc 		    "rbr consumed %d "
250553f3d8ecSyc 		    "pktbufsz_type %d "
250653f3d8ecSyc 		    "nmp $%p rptr $%p wptr $%p "
250753f3d8ecSyc 		    "buf_offset %d bzise %d l2_len %d skip_len %d",
250853f3d8ecSyc 		    rx_rbr_p->rbr_consumed,
250953f3d8ecSyc 		    pktbufsz_type,
251053f3d8ecSyc 		    nmp, nmp->b_rptr, nmp->b_wptr,
251153f3d8ecSyc 		    buf_offset, bsize, l2_len, skip_len));
251244961713Sgirish 	} else {
251344961713Sgirish 		cmn_err(CE_WARN, "!nxge_receive_packet: "
251444961713Sgirish 			"update stats (error)");
25152e59129aSraghus 		atomic_inc_32(&rx_msg_p->ref_cnt);
25162e59129aSraghus 		atomic_inc_32(&nxge_mblks_pending);
25172e59129aSraghus 		if (buffer_free == B_TRUE) {
25182e59129aSraghus 			rx_msg_p->free = B_TRUE;
25192e59129aSraghus 		}
25202e59129aSraghus 		MUTEX_EXIT(&rx_rbr_p->lock);
25212e59129aSraghus 		MUTEX_EXIT(&rcr_p->lock);
25222e59129aSraghus 		nxge_freeb(rx_msg_p);
25232e59129aSraghus 		return;
252444961713Sgirish 	}
2525ee5416c9Syc 
252644961713Sgirish 	if (buffer_free == B_TRUE) {
252744961713Sgirish 		rx_msg_p->free = B_TRUE;
252844961713Sgirish 	}
252944961713Sgirish 	/*
253044961713Sgirish 	 * ERROR, FRAG and PKT_TYPE are only reported
253144961713Sgirish 	 * in the first entry.
253244961713Sgirish 	 * If a packet is not fragmented and no error bit is set, then
253344961713Sgirish 	 * L4 checksum is OK.
253444961713Sgirish 	 */
253544961713Sgirish 	is_valid = (nmp != NULL);
253653f3d8ecSyc 	if (first_entry) {
253753f3d8ecSyc 		rdc_stats->ipackets++; /* count only 1st seg for jumbo */
253853f3d8ecSyc 		rdc_stats->ibytes += skip_len + l2_len < bsize ?
25397a2b8adfSyc 		    l2_len : bsize;
254053f3d8ecSyc 	} else {
254153f3d8ecSyc 		rdc_stats->ibytes += l2_len - bytes_read < bsize ?
254253f3d8ecSyc 		    l2_len - bytes_read : bsize;
254353f3d8ecSyc 	}
254453f3d8ecSyc 
254553f3d8ecSyc 	rcr_p->rcvd_pkt_bytes = bytes_read;
254653f3d8ecSyc 
254744961713Sgirish 	MUTEX_EXIT(&rx_rbr_p->lock);
254844961713Sgirish 	MUTEX_EXIT(&rcr_p->lock);
254944961713Sgirish 
255044961713Sgirish 	if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) {
255144961713Sgirish 		atomic_inc_32(&rx_msg_p->ref_cnt);
255256d930aeSspeer 		atomic_inc_32(&nxge_mblks_pending);
255344961713Sgirish 		nxge_freeb(rx_msg_p);
255444961713Sgirish 	}
255544961713Sgirish 
255644961713Sgirish 	if (is_valid) {
2557a3c5bd6dSspeer 		nmp->b_cont = NULL;
255844961713Sgirish 		if (first_entry) {
255944961713Sgirish 			*mp = nmp;
256044961713Sgirish 			*mp_cont = NULL;
256153f3d8ecSyc 		} else {
256244961713Sgirish 			*mp_cont = nmp;
256353f3d8ecSyc 		}
256444961713Sgirish 	}
256544961713Sgirish 
256644961713Sgirish 	/*
256744961713Sgirish 	 * Update stats and hardware checksuming.
256844961713Sgirish 	 */
256944961713Sgirish 	if (is_valid && !multi) {
257044961713Sgirish 
257144961713Sgirish 		is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP ||
257244961713Sgirish 				pkt_type == RCR_PKT_IS_UDP) ?
257344961713Sgirish 					B_TRUE: B_FALSE);
257444961713Sgirish 
257544961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: "
257614ea4bb7Ssd 			"is_valid 0x%x multi 0x%llx pkt %d frag %d error %d",
257744961713Sgirish 			is_valid, multi, is_tcp_udp, frag, error_type));
257844961713Sgirish 
257944961713Sgirish 		if (is_tcp_udp && !frag && !error_type) {
258044961713Sgirish 			(void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0,
258144961713Sgirish 				HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0);
258244961713Sgirish 			NXGE_DEBUG_MSG((nxgep, RX_CTL,
258344961713Sgirish 				"==> nxge_receive_packet: Full tcp/udp cksum "
258414ea4bb7Ssd 				"is_valid 0x%x multi 0x%llx pkt %d frag %d "
258544961713Sgirish 				"error %d",
258644961713Sgirish 				is_valid, multi, is_tcp_udp, frag, error_type));
258744961713Sgirish 		}
258844961713Sgirish 	}
258944961713Sgirish 
259044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
259144961713Sgirish 		"==> nxge_receive_packet: *mp 0x%016llx", *mp));
259244961713Sgirish 
259344961713Sgirish 	*multi_p = (multi == RCR_MULTI_MASK);
259444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: "
259544961713Sgirish 		"multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx",
259644961713Sgirish 		*multi_p, nmp, *mp, *mp_cont));
259744961713Sgirish }
259844961713Sgirish 
259944961713Sgirish /*ARGSUSED*/
260044961713Sgirish static nxge_status_t
260144961713Sgirish nxge_rx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp,
260244961713Sgirish 						rx_dma_ctl_stat_t cs)
260344961713Sgirish {
260444961713Sgirish 	p_nxge_rx_ring_stats_t	rdc_stats;
260544961713Sgirish 	npi_handle_t		handle;
260644961713Sgirish 	npi_status_t		rs;
260744961713Sgirish 	boolean_t		rxchan_fatal = B_FALSE;
260844961713Sgirish 	boolean_t		rxport_fatal = B_FALSE;
260944961713Sgirish 	uint8_t			channel;
261044961713Sgirish 	uint8_t			portn;
261144961713Sgirish 	nxge_status_t		status = NXGE_OK;
261244961713Sgirish 	uint32_t		error_disp_cnt = NXGE_ERROR_SHOW_MAX;
261344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts"));
261444961713Sgirish 
261544961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
261644961713Sgirish 	channel = ldvp->channel;
261744961713Sgirish 	portn = nxgep->mac.portnum;
261844961713Sgirish 	rdc_stats = &nxgep->statsp->rdc_stats[ldvp->vdma_index];
261944961713Sgirish 
262044961713Sgirish 	if (cs.bits.hdw.rbr_tmout) {
262144961713Sgirish 		rdc_stats->rx_rbr_tmout++;
262244961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
262344961713Sgirish 					NXGE_FM_EREPORT_RDMC_RBR_TMOUT);
262444961713Sgirish 		rxchan_fatal = B_TRUE;
262544961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
262644961713Sgirish 			"==> nxge_rx_err_evnts: rx_rbr_timeout"));
262744961713Sgirish 	}
262844961713Sgirish 	if (cs.bits.hdw.rsp_cnt_err) {
262944961713Sgirish 		rdc_stats->rsp_cnt_err++;
263044961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
263144961713Sgirish 					NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR);
263244961713Sgirish 		rxchan_fatal = B_TRUE;
263344961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
263444961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
263544961713Sgirish 			"rsp_cnt_err", channel));
263644961713Sgirish 	}
263744961713Sgirish 	if (cs.bits.hdw.byte_en_bus) {
263844961713Sgirish 		rdc_stats->byte_en_bus++;
263944961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
264044961713Sgirish 					NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS);
264144961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
264244961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
264344961713Sgirish 			"fatal error: byte_en_bus", channel));
264444961713Sgirish 		rxchan_fatal = B_TRUE;
264544961713Sgirish 	}
264644961713Sgirish 	if (cs.bits.hdw.rsp_dat_err) {
264744961713Sgirish 		rdc_stats->rsp_dat_err++;
264844961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
264944961713Sgirish 					NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR);
265044961713Sgirish 		rxchan_fatal = B_TRUE;
265144961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
265244961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
265344961713Sgirish 			"fatal error: rsp_dat_err", channel));
265444961713Sgirish 	}
265544961713Sgirish 	if (cs.bits.hdw.rcr_ack_err) {
265644961713Sgirish 		rdc_stats->rcr_ack_err++;
265744961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
265844961713Sgirish 					NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR);
265944961713Sgirish 		rxchan_fatal = B_TRUE;
266044961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
266144961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
266244961713Sgirish 			"fatal error: rcr_ack_err", channel));
266344961713Sgirish 	}
266444961713Sgirish 	if (cs.bits.hdw.dc_fifo_err) {
266544961713Sgirish 		rdc_stats->dc_fifo_err++;
266644961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
266744961713Sgirish 					NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR);
266844961713Sgirish 		/* This is not a fatal error! */
266944961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
267044961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
267144961713Sgirish 			"dc_fifo_err", channel));
267244961713Sgirish 		rxport_fatal = B_TRUE;
267344961713Sgirish 	}
267444961713Sgirish 	if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) {
267544961713Sgirish 		if ((rs = npi_rxdma_ring_perr_stat_get(handle,
267644961713Sgirish 				&rdc_stats->errlog.pre_par,
267744961713Sgirish 				&rdc_stats->errlog.sha_par))
267844961713Sgirish 				!= NPI_SUCCESS) {
267944961713Sgirish 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
268044961713Sgirish 				"==> nxge_rx_err_evnts(channel %d): "
268144961713Sgirish 				"rcr_sha_par: get perr", channel));
268244961713Sgirish 			return (NXGE_ERROR | rs);
268344961713Sgirish 		}
268444961713Sgirish 		if (cs.bits.hdw.rcr_sha_par) {
268544961713Sgirish 			rdc_stats->rcr_sha_par++;
268644961713Sgirish 			NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
268744961713Sgirish 					NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR);
268844961713Sgirish 			rxchan_fatal = B_TRUE;
268944961713Sgirish 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
269044961713Sgirish 				"==> nxge_rx_err_evnts(channel %d): "
269144961713Sgirish 				"fatal error: rcr_sha_par", channel));
269244961713Sgirish 		}
269344961713Sgirish 		if (cs.bits.hdw.rbr_pre_par) {
269444961713Sgirish 			rdc_stats->rbr_pre_par++;
269544961713Sgirish 			NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
269644961713Sgirish 					NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR);
269744961713Sgirish 			rxchan_fatal = B_TRUE;
269844961713Sgirish 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
269944961713Sgirish 				"==> nxge_rx_err_evnts(channel %d): "
270044961713Sgirish 				"fatal error: rbr_pre_par", channel));
270144961713Sgirish 		}
270244961713Sgirish 	}
270344961713Sgirish 	if (cs.bits.hdw.port_drop_pkt) {
270444961713Sgirish 		rdc_stats->port_drop_pkt++;
270544961713Sgirish 		if (rdc_stats->port_drop_pkt < error_disp_cnt)
270644961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
270744961713Sgirish 			"==> nxge_rx_err_evnts (channel %d): "
270844961713Sgirish 			"port_drop_pkt", channel));
270944961713Sgirish 	}
271044961713Sgirish 	if (cs.bits.hdw.wred_drop) {
271144961713Sgirish 		rdc_stats->wred_drop++;
271244961713Sgirish 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
271344961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
271444961713Sgirish 		"wred_drop", channel));
271544961713Sgirish 	}
271644961713Sgirish 	if (cs.bits.hdw.rbr_pre_empty) {
271744961713Sgirish 		rdc_stats->rbr_pre_empty++;
271844961713Sgirish 		if (rdc_stats->rbr_pre_empty < error_disp_cnt)
271944961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
272044961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
272144961713Sgirish 			"rbr_pre_empty", channel));
272244961713Sgirish 	}
272344961713Sgirish 	if (cs.bits.hdw.rcr_shadow_full) {
272444961713Sgirish 		rdc_stats->rcr_shadow_full++;
272544961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
272644961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
272744961713Sgirish 			"rcr_shadow_full", channel));
272844961713Sgirish 	}
272944961713Sgirish 	if (cs.bits.hdw.config_err) {
273044961713Sgirish 		rdc_stats->config_err++;
273144961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
273244961713Sgirish 					NXGE_FM_EREPORT_RDMC_CONFIG_ERR);
273344961713Sgirish 		rxchan_fatal = B_TRUE;
273444961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
273544961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
273644961713Sgirish 			"config error", channel));
273744961713Sgirish 	}
273844961713Sgirish 	if (cs.bits.hdw.rcrincon) {
273944961713Sgirish 		rdc_stats->rcrincon++;
274044961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
274144961713Sgirish 					NXGE_FM_EREPORT_RDMC_RCRINCON);
274244961713Sgirish 		rxchan_fatal = B_TRUE;
274344961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
274444961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
274544961713Sgirish 			"fatal error: rcrincon error", channel));
274644961713Sgirish 	}
274744961713Sgirish 	if (cs.bits.hdw.rcrfull) {
274844961713Sgirish 		rdc_stats->rcrfull++;
274944961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
275044961713Sgirish 					NXGE_FM_EREPORT_RDMC_RCRFULL);
275144961713Sgirish 		rxchan_fatal = B_TRUE;
275244961713Sgirish 		if (rdc_stats->rcrfull < error_disp_cnt)
275344961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
275444961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
275544961713Sgirish 			"fatal error: rcrfull error", channel));
275644961713Sgirish 	}
275744961713Sgirish 	if (cs.bits.hdw.rbr_empty) {
275844961713Sgirish 		rdc_stats->rbr_empty++;
275944961713Sgirish 		if (rdc_stats->rbr_empty < error_disp_cnt)
276044961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
276144961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
276244961713Sgirish 			"rbr empty error", channel));
276344961713Sgirish 	}
276444961713Sgirish 	if (cs.bits.hdw.rbrfull) {
276544961713Sgirish 		rdc_stats->rbrfull++;
276644961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
276744961713Sgirish 					NXGE_FM_EREPORT_RDMC_RBRFULL);
276844961713Sgirish 		rxchan_fatal = B_TRUE;
276944961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
277044961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
277144961713Sgirish 			"fatal error: rbr_full error", channel));
277244961713Sgirish 	}
277344961713Sgirish 	if (cs.bits.hdw.rbrlogpage) {
277444961713Sgirish 		rdc_stats->rbrlogpage++;
277544961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
277644961713Sgirish 					NXGE_FM_EREPORT_RDMC_RBRLOGPAGE);
277744961713Sgirish 		rxchan_fatal = B_TRUE;
277844961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
277944961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
278044961713Sgirish 			"fatal error: rbr logical page error", channel));
278144961713Sgirish 	}
278244961713Sgirish 	if (cs.bits.hdw.cfiglogpage) {
278344961713Sgirish 		rdc_stats->cfiglogpage++;
278444961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
278544961713Sgirish 					NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE);
278644961713Sgirish 		rxchan_fatal = B_TRUE;
278744961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
278844961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
278944961713Sgirish 			"fatal error: cfig logical page error", channel));
279044961713Sgirish 	}
279144961713Sgirish 
279244961713Sgirish 	if (rxport_fatal)  {
279344961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
279444961713Sgirish 				" nxge_rx_err_evnts: "
279544961713Sgirish 				" fatal error on Port #%d\n",
279644961713Sgirish 				portn));
279744961713Sgirish 		status = nxge_ipp_fatal_err_recover(nxgep);
279844961713Sgirish 		if (status == NXGE_OK) {
279944961713Sgirish 			FM_SERVICE_RESTORED(nxgep);
280044961713Sgirish 		}
280144961713Sgirish 	}
280244961713Sgirish 
280344961713Sgirish 	if (rxchan_fatal) {
280444961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
280544961713Sgirish 				" nxge_rx_err_evnts: "
280644961713Sgirish 				" fatal error on Channel #%d\n",
280744961713Sgirish 				channel));
280844961713Sgirish 		status = nxge_rxdma_fatal_err_recover(nxgep, channel);
280944961713Sgirish 		if (status == NXGE_OK) {
281044961713Sgirish 			FM_SERVICE_RESTORED(nxgep);
281144961713Sgirish 		}
281244961713Sgirish 	}
281344961713Sgirish 
281444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts"));
281544961713Sgirish 
281644961713Sgirish 	return (status);
281744961713Sgirish }
281844961713Sgirish 
281944961713Sgirish static nxge_status_t
282044961713Sgirish nxge_map_rxdma(p_nxge_t nxgep)
282144961713Sgirish {
282244961713Sgirish 	int			i, ndmas;
282344961713Sgirish 	uint16_t		channel;
282444961713Sgirish 	p_rx_rbr_rings_t 	rx_rbr_rings;
282544961713Sgirish 	p_rx_rbr_ring_t		*rbr_rings;
282644961713Sgirish 	p_rx_rcr_rings_t 	rx_rcr_rings;
282744961713Sgirish 	p_rx_rcr_ring_t		*rcr_rings;
282844961713Sgirish 	p_rx_mbox_areas_t 	rx_mbox_areas_p;
282944961713Sgirish 	p_rx_mbox_t		*rx_mbox_p;
283044961713Sgirish 	p_nxge_dma_pool_t	dma_buf_poolp;
283144961713Sgirish 	p_nxge_dma_pool_t	dma_cntl_poolp;
283244961713Sgirish 	p_nxge_dma_common_t	*dma_buf_p;
283344961713Sgirish 	p_nxge_dma_common_t	*dma_cntl_p;
283444961713Sgirish 	uint32_t		*num_chunks;
283544961713Sgirish 	nxge_status_t		status = NXGE_OK;
283644961713Sgirish #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
283744961713Sgirish 	p_nxge_dma_common_t	t_dma_buf_p;
283844961713Sgirish 	p_nxge_dma_common_t	t_dma_cntl_p;
283944961713Sgirish #endif
284044961713Sgirish 
284144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma"));
284244961713Sgirish 
284344961713Sgirish 	dma_buf_poolp = nxgep->rx_buf_pool_p;
284444961713Sgirish 	dma_cntl_poolp = nxgep->rx_cntl_pool_p;
284544961713Sgirish 
284644961713Sgirish 	if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
284744961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
284844961713Sgirish 			"<== nxge_map_rxdma: buf not allocated"));
284944961713Sgirish 		return (NXGE_ERROR);
285044961713Sgirish 	}
285144961713Sgirish 
285244961713Sgirish 	ndmas = dma_buf_poolp->ndmas;
285344961713Sgirish 	if (!ndmas) {
285444961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
285544961713Sgirish 			"<== nxge_map_rxdma: no dma allocated"));
285644961713Sgirish 		return (NXGE_ERROR);
285744961713Sgirish 	}
285844961713Sgirish 
285944961713Sgirish 	num_chunks = dma_buf_poolp->num_chunks;
286044961713Sgirish 	dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
286144961713Sgirish 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
286244961713Sgirish 
286314ea4bb7Ssd 	rx_rbr_rings = (p_rx_rbr_rings_t)
286414ea4bb7Ssd 		KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP);
286514ea4bb7Ssd 	rbr_rings = (p_rx_rbr_ring_t *)
286614ea4bb7Ssd 		KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * ndmas, KM_SLEEP);
286744961713Sgirish 	rx_rcr_rings = (p_rx_rcr_rings_t)
286814ea4bb7Ssd 		KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP);
286914ea4bb7Ssd 	rcr_rings = (p_rx_rcr_ring_t *)
287014ea4bb7Ssd 		KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * ndmas, KM_SLEEP);
287144961713Sgirish 	rx_mbox_areas_p = (p_rx_mbox_areas_t)
287214ea4bb7Ssd 		KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP);
287314ea4bb7Ssd 	rx_mbox_p = (p_rx_mbox_t *)
287414ea4bb7Ssd 		KMEM_ZALLOC(sizeof (p_rx_mbox_t) * ndmas, KM_SLEEP);
287514ea4bb7Ssd 
287614ea4bb7Ssd 	/*
287714ea4bb7Ssd 	 * Timeout should be set based on the system clock divider.
287814ea4bb7Ssd 	 * The following timeout value of 1 assumes that the
287914ea4bb7Ssd 	 * granularity (1000) is 3 microseconds running at 300MHz.
288014ea4bb7Ssd 	 */
288114ea4bb7Ssd 
288214ea4bb7Ssd 	nxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT;
288314ea4bb7Ssd 	nxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT;
288444961713Sgirish 
288544961713Sgirish 	/*
288644961713Sgirish 	 * Map descriptors from the buffer polls for each dam channel.
288744961713Sgirish 	 */
288844961713Sgirish 	for (i = 0; i < ndmas; i++) {
288944961713Sgirish 		/*
289044961713Sgirish 		 * Set up and prepare buffer blocks, descriptors
289144961713Sgirish 		 * and mailbox.
289244961713Sgirish 		 */
289344961713Sgirish 		channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel;
289444961713Sgirish 		status = nxge_map_rxdma_channel(nxgep, channel,
289544961713Sgirish 				(p_nxge_dma_common_t *)&dma_buf_p[i],
289644961713Sgirish 				(p_rx_rbr_ring_t *)&rbr_rings[i],
289744961713Sgirish 				num_chunks[i],
289844961713Sgirish 				(p_nxge_dma_common_t *)&dma_cntl_p[i],
289944961713Sgirish 				(p_rx_rcr_ring_t *)&rcr_rings[i],
290044961713Sgirish 				(p_rx_mbox_t *)&rx_mbox_p[i]);
290144961713Sgirish 		if (status != NXGE_OK) {
290244961713Sgirish 			goto nxge_map_rxdma_fail1;
290344961713Sgirish 		}
290444961713Sgirish 		rbr_rings[i]->index = (uint16_t)i;
290544961713Sgirish 		rcr_rings[i]->index = (uint16_t)i;
290644961713Sgirish 		rcr_rings[i]->rdc_stats = &nxgep->statsp->rdc_stats[i];
290744961713Sgirish 
290844961713Sgirish #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
290944961713Sgirish 		if (nxgep->niu_type == N2_NIU && NXGE_DMA_BLOCK == 1) {
291044961713Sgirish 			rbr_rings[i]->hv_set = B_FALSE;
291144961713Sgirish 			t_dma_buf_p = (p_nxge_dma_common_t)dma_buf_p[i];
291244961713Sgirish 			t_dma_cntl_p =
291344961713Sgirish 				(p_nxge_dma_common_t)dma_cntl_p[i];
291444961713Sgirish 
291544961713Sgirish 			rbr_rings[i]->hv_rx_buf_base_ioaddr_pp =
291644961713Sgirish 				(uint64_t)t_dma_buf_p->orig_ioaddr_pp;
291744961713Sgirish 			rbr_rings[i]->hv_rx_buf_ioaddr_size =
291844961713Sgirish 				(uint64_t)t_dma_buf_p->orig_alength;
291944961713Sgirish 			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
292044961713Sgirish 				"==> nxge_map_rxdma_channel: "
292144961713Sgirish 				"channel %d "
292244961713Sgirish 				"data buf base io $%p ($%p) "
292344961713Sgirish 				"size 0x%llx (%d 0x%x)",
292444961713Sgirish 				channel,
292544961713Sgirish 				rbr_rings[i]->hv_rx_buf_base_ioaddr_pp,
292644961713Sgirish 				t_dma_cntl_p->ioaddr_pp,
292744961713Sgirish 				rbr_rings[i]->hv_rx_buf_ioaddr_size,
292844961713Sgirish 				t_dma_buf_p->orig_alength,
292944961713Sgirish 				t_dma_buf_p->orig_alength));
293044961713Sgirish 
293144961713Sgirish 			rbr_rings[i]->hv_rx_cntl_base_ioaddr_pp =
293244961713Sgirish 				(uint64_t)t_dma_cntl_p->orig_ioaddr_pp;
293344961713Sgirish 			rbr_rings[i]->hv_rx_cntl_ioaddr_size =
293444961713Sgirish 				(uint64_t)t_dma_cntl_p->orig_alength;
293544961713Sgirish 			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
293644961713Sgirish 				"==> nxge_map_rxdma_channel: "
293744961713Sgirish 				"channel %d "
293844961713Sgirish 				"cntl base io $%p ($%p) "
293944961713Sgirish 				"size 0x%llx (%d 0x%x)",
294044961713Sgirish 				channel,
294144961713Sgirish 				rbr_rings[i]->hv_rx_cntl_base_ioaddr_pp,
294244961713Sgirish 				t_dma_cntl_p->ioaddr_pp,
294344961713Sgirish 				rbr_rings[i]->hv_rx_cntl_ioaddr_size,
294444961713Sgirish 				t_dma_cntl_p->orig_alength,
294544961713Sgirish 				t_dma_cntl_p->orig_alength));
294644961713Sgirish 		}
294744961713Sgirish 
294844961713Sgirish #endif	/* sun4v and NIU_LP_WORKAROUND */
294944961713Sgirish 	}
295044961713Sgirish 
295144961713Sgirish 	rx_rbr_rings->ndmas = rx_rcr_rings->ndmas = ndmas;
295244961713Sgirish 	rx_rbr_rings->rbr_rings = rbr_rings;
295344961713Sgirish 	nxgep->rx_rbr_rings = rx_rbr_rings;
295444961713Sgirish 	rx_rcr_rings->rcr_rings = rcr_rings;
295544961713Sgirish 	nxgep->rx_rcr_rings = rx_rcr_rings;
295644961713Sgirish 
295744961713Sgirish 	rx_mbox_areas_p->rxmbox_areas = rx_mbox_p;
295844961713Sgirish 	nxgep->rx_mbox_areas_p = rx_mbox_areas_p;
295944961713Sgirish 
296044961713Sgirish 	goto nxge_map_rxdma_exit;
296144961713Sgirish 
296244961713Sgirish nxge_map_rxdma_fail1:
296344961713Sgirish 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
296444961713Sgirish 		"==> nxge_map_rxdma: unmap rbr,rcr "
296544961713Sgirish 		"(status 0x%x channel %d i %d)",
296644961713Sgirish 		status, channel, i));
296756d930aeSspeer 	i--;
296844961713Sgirish 	for (; i >= 0; i--) {
296944961713Sgirish 		channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel;
297044961713Sgirish 		nxge_unmap_rxdma_channel(nxgep, channel,
297144961713Sgirish 			rbr_rings[i],
297244961713Sgirish 			rcr_rings[i],
297344961713Sgirish 			rx_mbox_p[i]);
297444961713Sgirish 	}
297544961713Sgirish 
297644961713Sgirish 	KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas);
297744961713Sgirish 	KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t));
297844961713Sgirish 	KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas);
297944961713Sgirish 	KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t));
298044961713Sgirish 	KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas);
298144961713Sgirish 	KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t));
298244961713Sgirish 
298344961713Sgirish nxge_map_rxdma_exit:
298444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
298544961713Sgirish 		"<== nxge_map_rxdma: "
298644961713Sgirish 		"(status 0x%x channel %d)",
298744961713Sgirish 		status, channel));
298844961713Sgirish 
298944961713Sgirish 	return (status);
299044961713Sgirish }
299144961713Sgirish 
299244961713Sgirish static void
299344961713Sgirish nxge_unmap_rxdma(p_nxge_t nxgep)
299444961713Sgirish {
299544961713Sgirish 	int			i, ndmas;
299644961713Sgirish 	uint16_t		channel;
299744961713Sgirish 	p_rx_rbr_rings_t 	rx_rbr_rings;
299844961713Sgirish 	p_rx_rbr_ring_t		*rbr_rings;
299944961713Sgirish 	p_rx_rcr_rings_t 	rx_rcr_rings;
300044961713Sgirish 	p_rx_rcr_ring_t		*rcr_rings;
300144961713Sgirish 	p_rx_mbox_areas_t 	rx_mbox_areas_p;
300244961713Sgirish 	p_rx_mbox_t		*rx_mbox_p;
300344961713Sgirish 	p_nxge_dma_pool_t	dma_buf_poolp;
300444961713Sgirish 	p_nxge_dma_pool_t	dma_cntl_poolp;
300544961713Sgirish 	p_nxge_dma_common_t	*dma_buf_p;
300644961713Sgirish 
300744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma"));
300844961713Sgirish 
300944961713Sgirish 	dma_buf_poolp = nxgep->rx_buf_pool_p;
301044961713Sgirish 	dma_cntl_poolp = nxgep->rx_cntl_pool_p;
301144961713Sgirish 
301244961713Sgirish 	if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
301344961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
301444961713Sgirish 			"<== nxge_unmap_rxdma: NULL buf pointers"));
301544961713Sgirish 		return;
301644961713Sgirish 	}
301744961713Sgirish 
301844961713Sgirish 	rx_rbr_rings = nxgep->rx_rbr_rings;
301944961713Sgirish 	rx_rcr_rings = nxgep->rx_rcr_rings;
302044961713Sgirish 	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
302144961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
302244961713Sgirish 			"<== nxge_unmap_rxdma: NULL ring pointers"));
302344961713Sgirish 		return;
302444961713Sgirish 	}
302544961713Sgirish 	ndmas = rx_rbr_rings->ndmas;
302644961713Sgirish 	if (!ndmas) {
302744961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
302844961713Sgirish 			"<== nxge_unmap_rxdma: no channel"));
302944961713Sgirish 		return;
303044961713Sgirish 	}
303144961713Sgirish 
303244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
303344961713Sgirish 		"==> nxge_unmap_rxdma (ndmas %d)", ndmas));
303444961713Sgirish 	rbr_rings = rx_rbr_rings->rbr_rings;
303544961713Sgirish 	rcr_rings = rx_rcr_rings->rcr_rings;
303644961713Sgirish 	rx_mbox_areas_p = nxgep->rx_mbox_areas_p;
303744961713Sgirish 	rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
303844961713Sgirish 	dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
303944961713Sgirish 
304044961713Sgirish 	for (i = 0; i < ndmas; i++) {
304144961713Sgirish 		channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel;
304244961713Sgirish 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
304344961713Sgirish 			"==> nxge_unmap_rxdma (ndmas %d) channel %d",
304444961713Sgirish 				ndmas, channel));
304544961713Sgirish 		(void) nxge_unmap_rxdma_channel(nxgep, channel,
304644961713Sgirish 				(p_rx_rbr_ring_t)rbr_rings[i],
304744961713Sgirish 				(p_rx_rcr_ring_t)rcr_rings[i],
304844961713Sgirish 				(p_rx_mbox_t)rx_mbox_p[i]);
304944961713Sgirish 	}
305044961713Sgirish 
305144961713Sgirish 	KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t));
305244961713Sgirish 	KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas);
305344961713Sgirish 	KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t));
305444961713Sgirish 	KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas);
305544961713Sgirish 	KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t));
305644961713Sgirish 	KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas);
305744961713Sgirish 
305844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
305944961713Sgirish 		"<== nxge_unmap_rxdma"));
306044961713Sgirish }
306144961713Sgirish 
306244961713Sgirish nxge_status_t
306344961713Sgirish nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
306444961713Sgirish     p_nxge_dma_common_t *dma_buf_p,  p_rx_rbr_ring_t *rbr_p,
306544961713Sgirish     uint32_t num_chunks,
306644961713Sgirish     p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p,
306744961713Sgirish     p_rx_mbox_t *rx_mbox_p)
306844961713Sgirish {
306944961713Sgirish 	int	status = NXGE_OK;
307044961713Sgirish 
307144961713Sgirish 	/*
307244961713Sgirish 	 * Set up and prepare buffer blocks, descriptors
307344961713Sgirish 	 * and mailbox.
307444961713Sgirish 	 */
307544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
307644961713Sgirish 		"==> nxge_map_rxdma_channel (channel %d)", channel));
307744961713Sgirish 	/*
307844961713Sgirish 	 * Receive buffer blocks
307944961713Sgirish 	 */
308044961713Sgirish 	status = nxge_map_rxdma_channel_buf_ring(nxgep, channel,
308144961713Sgirish 			dma_buf_p, rbr_p, num_chunks);
308244961713Sgirish 	if (status != NXGE_OK) {
308344961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
308444961713Sgirish 			"==> nxge_map_rxdma_channel (channel %d): "
308544961713Sgirish 			"map buffer failed 0x%x", channel, status));
308644961713Sgirish 		goto nxge_map_rxdma_channel_exit;
308744961713Sgirish 	}
308844961713Sgirish 
308944961713Sgirish 	/*
309044961713Sgirish 	 * Receive block ring, completion ring and mailbox.
309144961713Sgirish 	 */
309244961713Sgirish 	status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel,
309344961713Sgirish 			dma_cntl_p, rbr_p, rcr_p, rx_mbox_p);
309444961713Sgirish 	if (status != NXGE_OK) {
309544961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
309644961713Sgirish 			"==> nxge_map_rxdma_channel (channel %d): "
309744961713Sgirish 			"map config failed 0x%x", channel, status));
309844961713Sgirish 		goto nxge_map_rxdma_channel_fail2;
309944961713Sgirish 	}
310044961713Sgirish 
310144961713Sgirish 	goto nxge_map_rxdma_channel_exit;
310244961713Sgirish 
310344961713Sgirish nxge_map_rxdma_channel_fail3:
310444961713Sgirish 	/* Free rbr, rcr */
310544961713Sgirish 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
310644961713Sgirish 		"==> nxge_map_rxdma_channel: free rbr/rcr "
310744961713Sgirish 		"(status 0x%x channel %d)",
310844961713Sgirish 		status, channel));
310944961713Sgirish 	nxge_unmap_rxdma_channel_cfg_ring(nxgep,
311044961713Sgirish 		*rcr_p, *rx_mbox_p);
311144961713Sgirish 
311244961713Sgirish nxge_map_rxdma_channel_fail2:
311344961713Sgirish 	/* Free buffer blocks */
311444961713Sgirish 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
311544961713Sgirish 		"==> nxge_map_rxdma_channel: free rx buffers"
311644961713Sgirish 		"(nxgep 0x%x status 0x%x channel %d)",
311744961713Sgirish 		nxgep, status, channel));
311844961713Sgirish 	nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p);
311944961713Sgirish 
312056d930aeSspeer 	status = NXGE_ERROR;
312156d930aeSspeer 
312244961713Sgirish nxge_map_rxdma_channel_exit:
312344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
312444961713Sgirish 		"<== nxge_map_rxdma_channel: "
312544961713Sgirish 		"(nxgep 0x%x status 0x%x channel %d)",
312644961713Sgirish 		nxgep, status, channel));
312744961713Sgirish 
312844961713Sgirish 	return (status);
312944961713Sgirish }
313044961713Sgirish 
313144961713Sgirish /*ARGSUSED*/
313244961713Sgirish static void
313344961713Sgirish nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
313444961713Sgirish     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
313544961713Sgirish {
313644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
313744961713Sgirish 		"==> nxge_unmap_rxdma_channel (channel %d)", channel));
313844961713Sgirish 
313944961713Sgirish 	/*
314044961713Sgirish 	 * unmap receive block ring, completion ring and mailbox.
314144961713Sgirish 	 */
314244961713Sgirish 	(void) nxge_unmap_rxdma_channel_cfg_ring(nxgep,
314344961713Sgirish 			rcr_p, rx_mbox_p);
314444961713Sgirish 
314544961713Sgirish 	/* unmap buffer blocks */
314644961713Sgirish 	(void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p);
314744961713Sgirish 
314844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel"));
314944961713Sgirish }
315044961713Sgirish 
315144961713Sgirish /*ARGSUSED*/
315244961713Sgirish static nxge_status_t
315344961713Sgirish nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel,
315444961713Sgirish     p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p,
315544961713Sgirish     p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p)
315644961713Sgirish {
315744961713Sgirish 	p_rx_rbr_ring_t 	rbrp;
315844961713Sgirish 	p_rx_rcr_ring_t 	rcrp;
315944961713Sgirish 	p_rx_mbox_t 		mboxp;
316044961713Sgirish 	p_nxge_dma_common_t 	cntl_dmap;
316144961713Sgirish 	p_nxge_dma_common_t 	dmap;
316244961713Sgirish 	p_rx_msg_t 		*rx_msg_ring;
316344961713Sgirish 	p_rx_msg_t 		rx_msg_p;
316444961713Sgirish 	p_rbr_cfig_a_t		rcfga_p;
316544961713Sgirish 	p_rbr_cfig_b_t		rcfgb_p;
316644961713Sgirish 	p_rcrcfig_a_t		cfga_p;
316744961713Sgirish 	p_rcrcfig_b_t		cfgb_p;
316844961713Sgirish 	p_rxdma_cfig1_t		cfig1_p;
316944961713Sgirish 	p_rxdma_cfig2_t		cfig2_p;
317044961713Sgirish 	p_rbr_kick_t		kick_p;
317144961713Sgirish 	uint32_t		dmaaddrp;
317244961713Sgirish 	uint32_t		*rbr_vaddrp;
317344961713Sgirish 	uint32_t		bkaddr;
317444961713Sgirish 	nxge_status_t		status = NXGE_OK;
317544961713Sgirish 	int			i;
317644961713Sgirish 	uint32_t 		nxge_port_rcr_size;
317744961713Sgirish 
317844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
317944961713Sgirish 		"==> nxge_map_rxdma_channel_cfg_ring"));
318044961713Sgirish 
318144961713Sgirish 	cntl_dmap = *dma_cntl_p;
318244961713Sgirish 
318344961713Sgirish 	/* Map in the receive block ring */
318444961713Sgirish 	rbrp = *rbr_p;
318544961713Sgirish 	dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc;
318644961713Sgirish 	nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4);
318744961713Sgirish 	/*
318844961713Sgirish 	 * Zero out buffer block ring descriptors.
318944961713Sgirish 	 */
319044961713Sgirish 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
319144961713Sgirish 
319244961713Sgirish 	rcfga_p = &(rbrp->rbr_cfga);
319344961713Sgirish 	rcfgb_p = &(rbrp->rbr_cfgb);
319444961713Sgirish 	kick_p = &(rbrp->rbr_kick);
319544961713Sgirish 	rcfga_p->value = 0;
319644961713Sgirish 	rcfgb_p->value = 0;
319744961713Sgirish 	kick_p->value = 0;
319844961713Sgirish 	rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress;
319944961713Sgirish 	rcfga_p->value = (rbrp->rbr_addr &
320044961713Sgirish 				(RBR_CFIG_A_STDADDR_MASK |
320144961713Sgirish 				RBR_CFIG_A_STDADDR_BASE_MASK));
320244961713Sgirish 	rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT);
320344961713Sgirish 
320444961713Sgirish 	rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0;
320544961713Sgirish 	rcfgb_p->bits.ldw.vld0 = 1;
320644961713Sgirish 	rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1;
320744961713Sgirish 	rcfgb_p->bits.ldw.vld1 = 1;
320844961713Sgirish 	rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2;
320944961713Sgirish 	rcfgb_p->bits.ldw.vld2 = 1;
321044961713Sgirish 	rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code;
321144961713Sgirish 
321244961713Sgirish 	/*
321344961713Sgirish 	 * For each buffer block, enter receive block address to the ring.
321444961713Sgirish 	 */
321544961713Sgirish 	rbr_vaddrp = (uint32_t *)dmap->kaddrp;
321644961713Sgirish 	rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp;
321744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
321844961713Sgirish 		"==> nxge_map_rxdma_channel_cfg_ring: channel %d "
321944961713Sgirish 		"rbr_vaddrp $%p", dma_channel, rbr_vaddrp));
322044961713Sgirish 
322144961713Sgirish 	rx_msg_ring = rbrp->rx_msg_ring;
322244961713Sgirish 	for (i = 0; i < rbrp->tnblocks; i++) {
322344961713Sgirish 		rx_msg_p = rx_msg_ring[i];
322444961713Sgirish 		rx_msg_p->nxgep = nxgep;
322544961713Sgirish 		rx_msg_p->rx_rbr_p = rbrp;
322644961713Sgirish 		bkaddr = (uint32_t)
322744961713Sgirish 			((rx_msg_p->buf_dma.dma_cookie.dmac_laddress
322844961713Sgirish 				>> RBR_BKADDR_SHIFT));
322944961713Sgirish 		rx_msg_p->free = B_FALSE;
323044961713Sgirish 		rx_msg_p->max_usage_cnt = 0xbaddcafe;
323144961713Sgirish 
323244961713Sgirish 		*rbr_vaddrp++ = bkaddr;
323344961713Sgirish 	}
323444961713Sgirish 
323544961713Sgirish 	kick_p->bits.ldw.bkadd = rbrp->rbb_max;
323644961713Sgirish 	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
323744961713Sgirish 
323844961713Sgirish 	rbrp->rbr_rd_index = 0;
323944961713Sgirish 
324044961713Sgirish 	rbrp->rbr_consumed = 0;
324144961713Sgirish 	rbrp->rbr_use_bcopy = B_TRUE;
324244961713Sgirish 	rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0;
324344961713Sgirish 	/*
324444961713Sgirish 	 * Do bcopy on packets greater than bcopy size once
324544961713Sgirish 	 * the lo threshold is reached.
324644961713Sgirish 	 * This lo threshold should be less than the hi threshold.
324744961713Sgirish 	 *
324844961713Sgirish 	 * Do bcopy on every packet once the hi threshold is reached.
324944961713Sgirish 	 */
325044961713Sgirish 	if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) {
325144961713Sgirish 		/* default it to use hi */
325244961713Sgirish 		nxge_rx_threshold_lo = nxge_rx_threshold_hi;
325344961713Sgirish 	}
325444961713Sgirish 
325544961713Sgirish 	if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) {
325644961713Sgirish 		nxge_rx_buf_size_type = NXGE_RBR_TYPE2;
325744961713Sgirish 	}
325844961713Sgirish 	rbrp->rbr_bufsize_type = nxge_rx_buf_size_type;
325944961713Sgirish 
326044961713Sgirish 	switch (nxge_rx_threshold_hi) {
326144961713Sgirish 	default:
326244961713Sgirish 	case	NXGE_RX_COPY_NONE:
326344961713Sgirish 		/* Do not do bcopy at all */
326444961713Sgirish 		rbrp->rbr_use_bcopy = B_FALSE;
326544961713Sgirish 		rbrp->rbr_threshold_hi = rbrp->rbb_max;
326644961713Sgirish 		break;
326744961713Sgirish 
326844961713Sgirish 	case NXGE_RX_COPY_1:
326944961713Sgirish 	case NXGE_RX_COPY_2:
327044961713Sgirish 	case NXGE_RX_COPY_3:
327144961713Sgirish 	case NXGE_RX_COPY_4:
327244961713Sgirish 	case NXGE_RX_COPY_5:
327344961713Sgirish 	case NXGE_RX_COPY_6:
327444961713Sgirish 	case NXGE_RX_COPY_7:
327544961713Sgirish 		rbrp->rbr_threshold_hi =
327644961713Sgirish 			rbrp->rbb_max *
327744961713Sgirish 			(nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE;
327844961713Sgirish 		break;
327944961713Sgirish 
328044961713Sgirish 	case NXGE_RX_COPY_ALL:
328144961713Sgirish 		rbrp->rbr_threshold_hi = 0;
328244961713Sgirish 		break;
328344961713Sgirish 	}
328444961713Sgirish 
328544961713Sgirish 	switch (nxge_rx_threshold_lo) {
328644961713Sgirish 	default:
328744961713Sgirish 	case	NXGE_RX_COPY_NONE:
328844961713Sgirish 		/* Do not do bcopy at all */
328944961713Sgirish 		if (rbrp->rbr_use_bcopy) {
329044961713Sgirish 			rbrp->rbr_use_bcopy = B_FALSE;
329144961713Sgirish 		}
329244961713Sgirish 		rbrp->rbr_threshold_lo = rbrp->rbb_max;
329344961713Sgirish 		break;
329444961713Sgirish 
329544961713Sgirish 	case NXGE_RX_COPY_1:
329644961713Sgirish 	case NXGE_RX_COPY_2:
329744961713Sgirish 	case NXGE_RX_COPY_3:
329844961713Sgirish 	case NXGE_RX_COPY_4:
329944961713Sgirish 	case NXGE_RX_COPY_5:
330044961713Sgirish 	case NXGE_RX_COPY_6:
330144961713Sgirish 	case NXGE_RX_COPY_7:
330244961713Sgirish 		rbrp->rbr_threshold_lo =
330344961713Sgirish 			rbrp->rbb_max *
330444961713Sgirish 			(nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE;
330544961713Sgirish 		break;
330644961713Sgirish 
330744961713Sgirish 	case NXGE_RX_COPY_ALL:
330844961713Sgirish 		rbrp->rbr_threshold_lo = 0;
330944961713Sgirish 		break;
331044961713Sgirish 	}
331144961713Sgirish 
331244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
331344961713Sgirish 		"nxge_map_rxdma_channel_cfg_ring: channel %d "
331444961713Sgirish 		"rbb_max %d "
331544961713Sgirish 		"rbrp->rbr_bufsize_type %d "
331644961713Sgirish 		"rbb_threshold_hi %d "
331744961713Sgirish 		"rbb_threshold_lo %d",
331844961713Sgirish 		dma_channel,
331944961713Sgirish 		rbrp->rbb_max,
332044961713Sgirish 		rbrp->rbr_bufsize_type,
332144961713Sgirish 		rbrp->rbr_threshold_hi,
332244961713Sgirish 		rbrp->rbr_threshold_lo));
332344961713Sgirish 
332444961713Sgirish 	rbrp->page_valid.value = 0;
332544961713Sgirish 	rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0;
332644961713Sgirish 	rbrp->page_value_1.value = rbrp->page_value_2.value = 0;
332744961713Sgirish 	rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0;
332844961713Sgirish 	rbrp->page_hdl.value = 0;
332944961713Sgirish 
333044961713Sgirish 	rbrp->page_valid.bits.ldw.page0 = 1;
333144961713Sgirish 	rbrp->page_valid.bits.ldw.page1 = 1;
333244961713Sgirish 
333344961713Sgirish 	/* Map in the receive completion ring */
333444961713Sgirish 	rcrp = (p_rx_rcr_ring_t)
333514ea4bb7Ssd 		KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP);
333644961713Sgirish 	rcrp->rdc = dma_channel;
333744961713Sgirish 
333844961713Sgirish 	nxge_port_rcr_size = nxgep->nxge_port_rcr_size;
333944961713Sgirish 	rcrp->comp_size = nxge_port_rcr_size;
334044961713Sgirish 	rcrp->comp_wrap_mask = nxge_port_rcr_size - 1;
334144961713Sgirish 
334244961713Sgirish 	rcrp->max_receive_pkts = nxge_max_rx_pkts;
334344961713Sgirish 
334444961713Sgirish 	dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
334544961713Sgirish 	nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size,
334644961713Sgirish 			sizeof (rcr_entry_t));
334744961713Sgirish 	rcrp->comp_rd_index = 0;
334844961713Sgirish 	rcrp->comp_wt_index = 0;
334944961713Sgirish 	rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
335044961713Sgirish 		(p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
335144961713Sgirish 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
3352adfcba55Sjoycey #if defined(__i386)
3353adfcba55Sjoycey 		(p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
3354adfcba55Sjoycey #else
335544961713Sgirish 		(p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
3356adfcba55Sjoycey #endif
335744961713Sgirish 
335844961713Sgirish 	rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
335944961713Sgirish 			(nxge_port_rcr_size - 1);
336044961713Sgirish 	rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
336144961713Sgirish 			(nxge_port_rcr_size - 1);
336244961713Sgirish 
336344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
336444961713Sgirish 		"==> nxge_map_rxdma_channel_cfg_ring: "
336544961713Sgirish 		"channel %d "
336644961713Sgirish 		"rbr_vaddrp $%p "
336744961713Sgirish 		"rcr_desc_rd_head_p $%p "
336844961713Sgirish 		"rcr_desc_rd_head_pp $%p "
336944961713Sgirish 		"rcr_desc_rd_last_p $%p "
337044961713Sgirish 		"rcr_desc_rd_last_pp $%p ",
337144961713Sgirish 		dma_channel,
337244961713Sgirish 		rbr_vaddrp,
337344961713Sgirish 		rcrp->rcr_desc_rd_head_p,
337444961713Sgirish 		rcrp->rcr_desc_rd_head_pp,
337544961713Sgirish 		rcrp->rcr_desc_last_p,
337644961713Sgirish 		rcrp->rcr_desc_last_pp));
337744961713Sgirish 
337844961713Sgirish 	/*
337944961713Sgirish 	 * Zero out buffer block ring descriptors.
338044961713Sgirish 	 */
338144961713Sgirish 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
338214ea4bb7Ssd 	rcrp->intr_timeout = nxgep->intr_timeout;
338314ea4bb7Ssd 	rcrp->intr_threshold = nxgep->intr_threshold;
338444961713Sgirish 	rcrp->full_hdr_flag = B_FALSE;
338544961713Sgirish 	rcrp->sw_priv_hdr_len = 0;
338644961713Sgirish 
338744961713Sgirish 	cfga_p = &(rcrp->rcr_cfga);
338844961713Sgirish 	cfgb_p = &(rcrp->rcr_cfgb);
338944961713Sgirish 	cfga_p->value = 0;
339044961713Sgirish 	cfgb_p->value = 0;
339144961713Sgirish 	rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress;
339244961713Sgirish 	cfga_p->value = (rcrp->rcr_addr &
339344961713Sgirish 			    (RCRCFIG_A_STADDR_MASK |
339444961713Sgirish 			    RCRCFIG_A_STADDR_BASE_MASK));
339544961713Sgirish 
339644961713Sgirish 	rcfga_p->value |= ((uint64_t)rcrp->comp_size <<
339744961713Sgirish 				RCRCFIG_A_LEN_SHIF);
339844961713Sgirish 
339944961713Sgirish 	/*
340044961713Sgirish 	 * Timeout should be set based on the system clock divider.
340144961713Sgirish 	 * The following timeout value of 1 assumes that the
340244961713Sgirish 	 * granularity (1000) is 3 microseconds running at 300MHz.
340344961713Sgirish 	 */
340414ea4bb7Ssd 	cfgb_p->bits.ldw.pthres = rcrp->intr_threshold;
340514ea4bb7Ssd 	cfgb_p->bits.ldw.timeout = rcrp->intr_timeout;
340644961713Sgirish 	cfgb_p->bits.ldw.entout = 1;
340744961713Sgirish 
340844961713Sgirish 	/* Map in the mailbox */
340944961713Sgirish 	mboxp = (p_rx_mbox_t)
341044961713Sgirish 			KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP);
341144961713Sgirish 	dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox;
341244961713Sgirish 	nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t));
341344961713Sgirish 	cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1;
341444961713Sgirish 	cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2;
341544961713Sgirish 	cfig1_p->value = cfig2_p->value = 0;
341644961713Sgirish 
341744961713Sgirish 	mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress;
341844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
341944961713Sgirish 		"==> nxge_map_rxdma_channel_cfg_ring: "
342044961713Sgirish 		"channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx",
342144961713Sgirish 		dma_channel, cfig1_p->value, cfig2_p->value,
342244961713Sgirish 		mboxp->mbox_addr));
342344961713Sgirish 
342444961713Sgirish 	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32
342544961713Sgirish 			& 0xfff);
342644961713Sgirish 	cfig1_p->bits.ldw.mbaddr_h = dmaaddrp;
342744961713Sgirish 
342844961713Sgirish 
342944961713Sgirish 	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff);
343044961713Sgirish 	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress &
343144961713Sgirish 				RXDMA_CFIG2_MBADDR_L_MASK);
343244961713Sgirish 
343344961713Sgirish 	cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT);
343444961713Sgirish 
343544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
343644961713Sgirish 		"==> nxge_map_rxdma_channel_cfg_ring: "
343744961713Sgirish 		"channel %d damaddrp $%p "
343844961713Sgirish 		"cfg1 0x%016llx cfig2 0x%016llx",
343944961713Sgirish 		dma_channel, dmaaddrp,
344044961713Sgirish 		cfig1_p->value, cfig2_p->value));
344144961713Sgirish 
344244961713Sgirish 	cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag;
344344961713Sgirish 	cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len;
344444961713Sgirish 
344544961713Sgirish 	rbrp->rx_rcr_p = rcrp;
344644961713Sgirish 	rcrp->rx_rbr_p = rbrp;
344744961713Sgirish 	*rcr_p = rcrp;
344844961713Sgirish 	*rx_mbox_p = mboxp;
344944961713Sgirish 
345044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
345144961713Sgirish 		"<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status));
345244961713Sgirish 
345344961713Sgirish 	return (status);
345444961713Sgirish }
345544961713Sgirish 
345644961713Sgirish /*ARGSUSED*/
345744961713Sgirish static void
345844961713Sgirish nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep,
345944961713Sgirish     p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
346044961713Sgirish {
346144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
346244961713Sgirish 		"==> nxge_unmap_rxdma_channel_cfg_ring: channel %d",
346344961713Sgirish 		rcr_p->rdc));
346444961713Sgirish 
346544961713Sgirish 	KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t));
346644961713Sgirish 	KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t));
346744961713Sgirish 
346844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
346944961713Sgirish 		"<== nxge_unmap_rxdma_channel_cfg_ring"));
347044961713Sgirish }
347144961713Sgirish 
347244961713Sgirish static nxge_status_t
347344961713Sgirish nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel,
347444961713Sgirish     p_nxge_dma_common_t *dma_buf_p,
347544961713Sgirish     p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks)
347644961713Sgirish {
347744961713Sgirish 	p_rx_rbr_ring_t 	rbrp;
347844961713Sgirish 	p_nxge_dma_common_t 	dma_bufp, tmp_bufp;
347944961713Sgirish 	p_rx_msg_t 		*rx_msg_ring;
348044961713Sgirish 	p_rx_msg_t 		rx_msg_p;
348144961713Sgirish 	p_mblk_t 		mblk_p;
348244961713Sgirish 
348344961713Sgirish 	rxring_info_t *ring_info;
348444961713Sgirish 	nxge_status_t		status = NXGE_OK;
348544961713Sgirish 	int			i, j, index;
348644961713Sgirish 	uint32_t		size, bsize, nblocks, nmsgs;
348744961713Sgirish 
348844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
348944961713Sgirish 		"==> nxge_map_rxdma_channel_buf_ring: channel %d",
349044961713Sgirish 		channel));
349144961713Sgirish 
349244961713Sgirish 	dma_bufp = tmp_bufp = *dma_buf_p;
349344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
349444961713Sgirish 		" nxge_map_rxdma_channel_buf_ring: channel %d to map %d "
349544961713Sgirish 		"chunks bufp 0x%016llx",
349644961713Sgirish 		channel, num_chunks, dma_bufp));
349744961713Sgirish 
349844961713Sgirish 	nmsgs = 0;
349944961713Sgirish 	for (i = 0; i < num_chunks; i++, tmp_bufp++) {
350044961713Sgirish 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
350144961713Sgirish 			"==> nxge_map_rxdma_channel_buf_ring: channel %d "
350244961713Sgirish 			"bufp 0x%016llx nblocks %d nmsgs %d",
350344961713Sgirish 			channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
350444961713Sgirish 		nmsgs += tmp_bufp->nblocks;
350544961713Sgirish 	}
350644961713Sgirish 	if (!nmsgs) {
350756d930aeSspeer 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
350844961713Sgirish 			"<== nxge_map_rxdma_channel_buf_ring: channel %d "
350944961713Sgirish 			"no msg blocks",
351044961713Sgirish 			channel));
351144961713Sgirish 		status = NXGE_ERROR;
351244961713Sgirish 		goto nxge_map_rxdma_channel_buf_ring_exit;
351344961713Sgirish 	}
351444961713Sgirish 
3515007969e0Stm 	rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP);
351644961713Sgirish 
351744961713Sgirish 	size = nmsgs * sizeof (p_rx_msg_t);
351844961713Sgirish 	rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
351944961713Sgirish 	ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t),
352044961713Sgirish 		KM_SLEEP);
352144961713Sgirish 
352244961713Sgirish 	MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER,
352344961713Sgirish 				(void *)nxgep->interrupt_cookie);
352444961713Sgirish 	MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER,
352544961713Sgirish 				(void *)nxgep->interrupt_cookie);
352644961713Sgirish 	rbrp->rdc = channel;
352744961713Sgirish 	rbrp->num_blocks = num_chunks;
352844961713Sgirish 	rbrp->tnblocks = nmsgs;
352944961713Sgirish 	rbrp->rbb_max = nmsgs;
353044961713Sgirish 	rbrp->rbr_max_size = nmsgs;
353144961713Sgirish 	rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1);
353244961713Sgirish 
353344961713Sgirish 	/*
353444961713Sgirish 	 * Buffer sizes suggested by NIU architect.
353544961713Sgirish 	 * 256, 512 and 2K.
353644961713Sgirish 	 */
353744961713Sgirish 
353844961713Sgirish 	rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B;
353944961713Sgirish 	rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES;
354044961713Sgirish 	rbrp->npi_pkt_buf_size0 = SIZE_256B;
354144961713Sgirish 
354244961713Sgirish 	rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K;
354344961713Sgirish 	rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES;
354444961713Sgirish 	rbrp->npi_pkt_buf_size1 = SIZE_1KB;
354544961713Sgirish 
354644961713Sgirish 	rbrp->block_size = nxgep->rx_default_block_size;
354744961713Sgirish 
354814ea4bb7Ssd 	if (!nxge_jumbo_enable && !nxgep->param_arr[param_accept_jumbo].value) {
354944961713Sgirish 		rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K;
355044961713Sgirish 		rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES;
355144961713Sgirish 		rbrp->npi_pkt_buf_size2 = SIZE_2KB;
355244961713Sgirish 	} else {
355344961713Sgirish 		if (rbrp->block_size >= 0x2000) {
355444961713Sgirish 			rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K;
355544961713Sgirish 			rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES;
355644961713Sgirish 			rbrp->npi_pkt_buf_size2 = SIZE_8KB;
355744961713Sgirish 		} else {
355844961713Sgirish 			rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K;
355944961713Sgirish 			rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES;
356044961713Sgirish 			rbrp->npi_pkt_buf_size2 = SIZE_4KB;
356144961713Sgirish 		}
356244961713Sgirish 	}
356344961713Sgirish 
356444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
356544961713Sgirish 		"==> nxge_map_rxdma_channel_buf_ring: channel %d "
356644961713Sgirish 		"actual rbr max %d rbb_max %d nmsgs %d "
356744961713Sgirish 		"rbrp->block_size %d default_block_size %d "
356844961713Sgirish 		"(config nxge_rbr_size %d nxge_rbr_spare_size %d)",
356944961713Sgirish 		channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs,
357044961713Sgirish 		rbrp->block_size, nxgep->rx_default_block_size,
357144961713Sgirish 		nxge_rbr_size, nxge_rbr_spare_size));
357244961713Sgirish 
357344961713Sgirish 	/* Map in buffers from the buffer pool.  */
357444961713Sgirish 	index = 0;
357544961713Sgirish 	for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) {
357644961713Sgirish 		bsize = dma_bufp->block_size;
357744961713Sgirish 		nblocks = dma_bufp->nblocks;
3578adfcba55Sjoycey #if defined(__i386)
3579adfcba55Sjoycey 		ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp;
3580adfcba55Sjoycey #else
358144961713Sgirish 		ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp;
3582adfcba55Sjoycey #endif
358344961713Sgirish 		ring_info->buffer[i].buf_index = i;
358444961713Sgirish 		ring_info->buffer[i].buf_size = dma_bufp->alength;
358544961713Sgirish 		ring_info->buffer[i].start_index = index;
3586adfcba55Sjoycey #if defined(__i386)
3587adfcba55Sjoycey 		ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp;
3588adfcba55Sjoycey #else
358944961713Sgirish 		ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp;
3590adfcba55Sjoycey #endif
359144961713Sgirish 
359244961713Sgirish 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
359344961713Sgirish 			" nxge_map_rxdma_channel_buf_ring: map channel %d "
359444961713Sgirish 			"chunk %d"
359544961713Sgirish 			" nblocks %d chunk_size %x block_size 0x%x "
359644961713Sgirish 			"dma_bufp $%p", channel, i,
359744961713Sgirish 			dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize,
359844961713Sgirish 			dma_bufp));
359944961713Sgirish 
360044961713Sgirish 		for (j = 0; j < nblocks; j++) {
360144961713Sgirish 			if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO,
360244961713Sgirish 					dma_bufp)) == NULL) {
360356d930aeSspeer 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
360456d930aeSspeer 					"allocb failed (index %d i %d j %d)",
360556d930aeSspeer 					index, i, j));
360656d930aeSspeer 				goto nxge_map_rxdma_channel_buf_ring_fail1;
360744961713Sgirish 			}
360844961713Sgirish 			rx_msg_ring[index] = rx_msg_p;
360944961713Sgirish 			rx_msg_p->block_index = index;
361044961713Sgirish 			rx_msg_p->shifted_addr = (uint32_t)
361144961713Sgirish 				((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >>
361244961713Sgirish 					    RBR_BKADDR_SHIFT));
361344961713Sgirish 
361444961713Sgirish 			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
361556d930aeSspeer 				"index %d j %d rx_msg_p $%p mblk %p",
361656d930aeSspeer 				index, j, rx_msg_p, rx_msg_p->rx_mblk_p));
361744961713Sgirish 
361844961713Sgirish 			mblk_p = rx_msg_p->rx_mblk_p;
361944961713Sgirish 			mblk_p->b_wptr = mblk_p->b_rptr + bsize;
3620007969e0Stm 
3621007969e0Stm 			rbrp->rbr_ref_cnt++;
362244961713Sgirish 			index++;
362344961713Sgirish 			rx_msg_p->buf_dma.dma_channel = channel;
362444961713Sgirish 		}
362544961713Sgirish 	}
362644961713Sgirish 	if (i < rbrp->num_blocks) {
362744961713Sgirish 		goto nxge_map_rxdma_channel_buf_ring_fail1;
362844961713Sgirish 	}
362944961713Sgirish 
363044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
363144961713Sgirish 		"nxge_map_rxdma_channel_buf_ring: done buf init "
363244961713Sgirish 			"channel %d msg block entries %d",
363344961713Sgirish 			channel, index));
363444961713Sgirish 	ring_info->block_size_mask = bsize - 1;
363544961713Sgirish 	rbrp->rx_msg_ring = rx_msg_ring;
363644961713Sgirish 	rbrp->dma_bufp = dma_buf_p;
363744961713Sgirish 	rbrp->ring_info = ring_info;
363844961713Sgirish 
363944961713Sgirish 	status = nxge_rxbuf_index_info_init(nxgep, rbrp);
364044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
364144961713Sgirish 		" nxge_map_rxdma_channel_buf_ring: "
364244961713Sgirish 		"channel %d done buf info init", channel));
364344961713Sgirish 
3644007969e0Stm 	/*
3645007969e0Stm 	 * Finally, permit nxge_freeb() to call nxge_post_page().
3646007969e0Stm 	 */
3647007969e0Stm 	rbrp->rbr_state = RBR_POSTING;
3648007969e0Stm 
364944961713Sgirish 	*rbr_p = rbrp;
365044961713Sgirish 	goto nxge_map_rxdma_channel_buf_ring_exit;
365144961713Sgirish 
365244961713Sgirish nxge_map_rxdma_channel_buf_ring_fail1:
365344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
365444961713Sgirish 		" nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)",
365544961713Sgirish 		channel, status));
365644961713Sgirish 
365744961713Sgirish 	index--;
365844961713Sgirish 	for (; index >= 0; index--) {
365944961713Sgirish 		rx_msg_p = rx_msg_ring[index];
366044961713Sgirish 		if (rx_msg_p != NULL) {
366114ea4bb7Ssd 			freeb(rx_msg_p->rx_mblk_p);
366244961713Sgirish 			rx_msg_ring[index] = NULL;
366344961713Sgirish 		}
366444961713Sgirish 	}
366544961713Sgirish nxge_map_rxdma_channel_buf_ring_fail:
366644961713Sgirish 	MUTEX_DESTROY(&rbrp->post_lock);
366744961713Sgirish 	MUTEX_DESTROY(&rbrp->lock);
366844961713Sgirish 	KMEM_FREE(ring_info, sizeof (rxring_info_t));
366944961713Sgirish 	KMEM_FREE(rx_msg_ring, size);
367044961713Sgirish 	KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t));
367144961713Sgirish 
367256d930aeSspeer 	status = NXGE_ERROR;
367356d930aeSspeer 
367444961713Sgirish nxge_map_rxdma_channel_buf_ring_exit:
367544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
367644961713Sgirish 		"<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status));
367744961713Sgirish 
367844961713Sgirish 	return (status);
367944961713Sgirish }
368044961713Sgirish 
368144961713Sgirish /*ARGSUSED*/
368244961713Sgirish static void
368344961713Sgirish nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep,
368444961713Sgirish     p_rx_rbr_ring_t rbr_p)
368544961713Sgirish {
368644961713Sgirish 	p_rx_msg_t 		*rx_msg_ring;
368744961713Sgirish 	p_rx_msg_t 		rx_msg_p;
368844961713Sgirish 	rxring_info_t 		*ring_info;
368944961713Sgirish 	int			i;
369044961713Sgirish 	uint32_t		size;
369144961713Sgirish #ifdef	NXGE_DEBUG
369244961713Sgirish 	int			num_chunks;
369344961713Sgirish #endif
369444961713Sgirish 
369544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
369644961713Sgirish 		"==> nxge_unmap_rxdma_channel_buf_ring"));
369744961713Sgirish 	if (rbr_p == NULL) {
369844961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
369944961713Sgirish 			"<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp"));
370044961713Sgirish 		return;
370144961713Sgirish 	}
370244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
370344961713Sgirish 		"==> nxge_unmap_rxdma_channel_buf_ring: channel %d",
370444961713Sgirish 		rbr_p->rdc));
370544961713Sgirish 
370644961713Sgirish 	rx_msg_ring = rbr_p->rx_msg_ring;
370744961713Sgirish 	ring_info = rbr_p->ring_info;
370844961713Sgirish 
370944961713Sgirish 	if (rx_msg_ring == NULL || ring_info == NULL) {
371044961713Sgirish 			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
371144961713Sgirish 		"<== nxge_unmap_rxdma_channel_buf_ring: "
371244961713Sgirish 		"rx_msg_ring $%p ring_info $%p",
371344961713Sgirish 		rx_msg_p, ring_info));
371444961713Sgirish 		return;
371544961713Sgirish 	}
371644961713Sgirish 
371744961713Sgirish #ifdef	NXGE_DEBUG
371844961713Sgirish 	num_chunks = rbr_p->num_blocks;
371944961713Sgirish #endif
372044961713Sgirish 	size = rbr_p->tnblocks * sizeof (p_rx_msg_t);
372144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
372244961713Sgirish 		" nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d "
372344961713Sgirish 		"tnblocks %d (max %d) size ptrs %d ",
372444961713Sgirish 		rbr_p->rdc, num_chunks,
372544961713Sgirish 		rbr_p->tnblocks, rbr_p->rbr_max_size, size));
372644961713Sgirish 
372744961713Sgirish 	for (i = 0; i < rbr_p->tnblocks; i++) {
372844961713Sgirish 		rx_msg_p = rx_msg_ring[i];
372944961713Sgirish 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
373044961713Sgirish 			" nxge_unmap_rxdma_channel_buf_ring: "
373144961713Sgirish 			"rx_msg_p $%p",
373244961713Sgirish 			rx_msg_p));
373344961713Sgirish 		if (rx_msg_p != NULL) {
373414ea4bb7Ssd 			freeb(rx_msg_p->rx_mblk_p);
373544961713Sgirish 			rx_msg_ring[i] = NULL;
373644961713Sgirish 		}
373744961713Sgirish 	}
373844961713Sgirish 
3739007969e0Stm 	/*
3740007969e0Stm 	 * We no longer may use the mutex <post_lock>. By setting
3741007969e0Stm 	 * <rbr_state> to anything but POSTING, we prevent
3742007969e0Stm 	 * nxge_post_page() from accessing a dead mutex.
3743007969e0Stm 	 */
3744007969e0Stm 	rbr_p->rbr_state = RBR_UNMAPPING;
374544961713Sgirish 	MUTEX_DESTROY(&rbr_p->post_lock);
3746007969e0Stm 
374744961713Sgirish 	MUTEX_DESTROY(&rbr_p->lock);
374844961713Sgirish 	KMEM_FREE(ring_info, sizeof (rxring_info_t));
374944961713Sgirish 	KMEM_FREE(rx_msg_ring, size);
3750007969e0Stm 
3751007969e0Stm 	if (rbr_p->rbr_ref_cnt == 0) {
3752007969e0Stm 		/* This is the normal state of affairs. */
3753007969e0Stm 		KMEM_FREE(rbr_p, sizeof (*rbr_p));
3754007969e0Stm 	} else {
3755007969e0Stm 		/*
3756007969e0Stm 		 * Some of our buffers are still being used.
3757007969e0Stm 		 * Therefore, tell nxge_freeb() this ring is
3758007969e0Stm 		 * unmapped, so it may free <rbr_p> for us.
3759007969e0Stm 		 */
3760007969e0Stm 		rbr_p->rbr_state = RBR_UNMAPPED;
3761007969e0Stm 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3762007969e0Stm 		    "unmap_rxdma_buf_ring: %d %s outstanding.",
3763007969e0Stm 		    rbr_p->rbr_ref_cnt,
3764007969e0Stm 		    rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs"));
3765007969e0Stm 	}
376644961713Sgirish 
376744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
376844961713Sgirish 		"<== nxge_unmap_rxdma_channel_buf_ring"));
376944961713Sgirish }
377044961713Sgirish 
377144961713Sgirish static nxge_status_t
377244961713Sgirish nxge_rxdma_hw_start_common(p_nxge_t nxgep)
377344961713Sgirish {
377444961713Sgirish 	nxge_status_t		status = NXGE_OK;
377544961713Sgirish 
377644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common"));
377744961713Sgirish 
377844961713Sgirish 	/*
377944961713Sgirish 	 * Load the sharable parameters by writing to the
378044961713Sgirish 	 * function zero control registers. These FZC registers
378144961713Sgirish 	 * should be initialized only once for the entire chip.
378244961713Sgirish 	 */
378344961713Sgirish 	(void) nxge_init_fzc_rx_common(nxgep);
378444961713Sgirish 
378544961713Sgirish 	/*
378644961713Sgirish 	 * Initialize the RXDMA port specific FZC control configurations.
378744961713Sgirish 	 * These FZC registers are pertaining to each port.
378844961713Sgirish 	 */
378944961713Sgirish 	(void) nxge_init_fzc_rxdma_port(nxgep);
379044961713Sgirish 
379144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common"));
379244961713Sgirish 
379344961713Sgirish 	return (status);
379444961713Sgirish }
379544961713Sgirish 
379644961713Sgirish /*ARGSUSED*/
379744961713Sgirish static void
379844961713Sgirish nxge_rxdma_hw_stop_common(p_nxge_t nxgep)
379944961713Sgirish {
380044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop_common"));
380144961713Sgirish 
380244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop_common"));
380344961713Sgirish }
380444961713Sgirish 
380544961713Sgirish static nxge_status_t
380644961713Sgirish nxge_rxdma_hw_start(p_nxge_t nxgep)
380744961713Sgirish {
380844961713Sgirish 	int			i, ndmas;
380944961713Sgirish 	uint16_t		channel;
381044961713Sgirish 	p_rx_rbr_rings_t 	rx_rbr_rings;
381144961713Sgirish 	p_rx_rbr_ring_t		*rbr_rings;
381244961713Sgirish 	p_rx_rcr_rings_t 	rx_rcr_rings;
381344961713Sgirish 	p_rx_rcr_ring_t		*rcr_rings;
381444961713Sgirish 	p_rx_mbox_areas_t 	rx_mbox_areas_p;
381544961713Sgirish 	p_rx_mbox_t		*rx_mbox_p;
381644961713Sgirish 	nxge_status_t		status = NXGE_OK;
381744961713Sgirish 
381844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start"));
381944961713Sgirish 
382044961713Sgirish 	rx_rbr_rings = nxgep->rx_rbr_rings;
382144961713Sgirish 	rx_rcr_rings = nxgep->rx_rcr_rings;
382244961713Sgirish 	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
382344961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
382444961713Sgirish 			"<== nxge_rxdma_hw_start: NULL ring pointers"));
382544961713Sgirish 		return (NXGE_ERROR);
382644961713Sgirish 	}
382744961713Sgirish 	ndmas = rx_rbr_rings->ndmas;
382844961713Sgirish 	if (ndmas == 0) {
382944961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
383044961713Sgirish 			"<== nxge_rxdma_hw_start: no dma channel allocated"));
383144961713Sgirish 		return (NXGE_ERROR);
383244961713Sgirish 	}
383344961713Sgirish 
383444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
383544961713Sgirish 		"==> nxge_rxdma_hw_start (ndmas %d)", ndmas));
383644961713Sgirish 
383744961713Sgirish 	rbr_rings = rx_rbr_rings->rbr_rings;
383844961713Sgirish 	rcr_rings = rx_rcr_rings->rcr_rings;
383944961713Sgirish 	rx_mbox_areas_p = nxgep->rx_mbox_areas_p;
384044961713Sgirish 	if (rx_mbox_areas_p) {
384144961713Sgirish 		rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
384244961713Sgirish 	}
384344961713Sgirish 
384444961713Sgirish 	for (i = 0; i < ndmas; i++) {
384544961713Sgirish 		channel = rbr_rings[i]->rdc;
384644961713Sgirish 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
384744961713Sgirish 			"==> nxge_rxdma_hw_start (ndmas %d) channel %d",
384844961713Sgirish 				ndmas, channel));
384944961713Sgirish 		status = nxge_rxdma_start_channel(nxgep, channel,
385044961713Sgirish 				(p_rx_rbr_ring_t)rbr_rings[i],
385144961713Sgirish 				(p_rx_rcr_ring_t)rcr_rings[i],
385244961713Sgirish 				(p_rx_mbox_t)rx_mbox_p[i]);
385344961713Sgirish 		if (status != NXGE_OK) {
385444961713Sgirish 			goto nxge_rxdma_hw_start_fail1;
385544961713Sgirish 		}
385644961713Sgirish 	}
385744961713Sgirish 
385844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: "
385944961713Sgirish 		"rx_rbr_rings 0x%016llx rings 0x%016llx",
386044961713Sgirish 		rx_rbr_rings, rx_rcr_rings));
386144961713Sgirish 
386244961713Sgirish 	goto nxge_rxdma_hw_start_exit;
386344961713Sgirish 
386444961713Sgirish nxge_rxdma_hw_start_fail1:
386544961713Sgirish 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
386644961713Sgirish 		"==> nxge_rxdma_hw_start: disable "
386744961713Sgirish 		"(status 0x%x channel %d i %d)", status, channel, i));
386844961713Sgirish 	for (; i >= 0; i--) {
386944961713Sgirish 		channel = rbr_rings[i]->rdc;
387044961713Sgirish 		(void) nxge_rxdma_stop_channel(nxgep, channel);
387144961713Sgirish 	}
387244961713Sgirish 
387344961713Sgirish nxge_rxdma_hw_start_exit:
387444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
387544961713Sgirish 		"==> nxge_rxdma_hw_start: (status 0x%x)", status));
387644961713Sgirish 
387744961713Sgirish 	return (status);
387844961713Sgirish }
387944961713Sgirish 
388044961713Sgirish static void
388144961713Sgirish nxge_rxdma_hw_stop(p_nxge_t nxgep)
388244961713Sgirish {
388344961713Sgirish 	int			i, ndmas;
388444961713Sgirish 	uint16_t		channel;
388544961713Sgirish 	p_rx_rbr_rings_t 	rx_rbr_rings;
388644961713Sgirish 	p_rx_rbr_ring_t		*rbr_rings;
388744961713Sgirish 	p_rx_rcr_rings_t 	rx_rcr_rings;
388844961713Sgirish 
388944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop"));
389044961713Sgirish 
389144961713Sgirish 	rx_rbr_rings = nxgep->rx_rbr_rings;
389244961713Sgirish 	rx_rcr_rings = nxgep->rx_rcr_rings;
389344961713Sgirish 	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
389444961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
389544961713Sgirish 			"<== nxge_rxdma_hw_stop: NULL ring pointers"));
389644961713Sgirish 		return;
389744961713Sgirish 	}
389844961713Sgirish 	ndmas = rx_rbr_rings->ndmas;
389944961713Sgirish 	if (!ndmas) {
390044961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
390144961713Sgirish 			"<== nxge_rxdma_hw_stop: no dma channel allocated"));
390244961713Sgirish 		return;
390344961713Sgirish 	}
390444961713Sgirish 
390544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
390644961713Sgirish 		"==> nxge_rxdma_hw_stop (ndmas %d)", ndmas));
390744961713Sgirish 
390844961713Sgirish 	rbr_rings = rx_rbr_rings->rbr_rings;
390944961713Sgirish 
391044961713Sgirish 	for (i = 0; i < ndmas; i++) {
391144961713Sgirish 		channel = rbr_rings[i]->rdc;
391244961713Sgirish 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
391344961713Sgirish 			"==> nxge_rxdma_hw_stop (ndmas %d) channel %d",
391444961713Sgirish 				ndmas, channel));
391544961713Sgirish 		(void) nxge_rxdma_stop_channel(nxgep, channel);
391644961713Sgirish 	}
391744961713Sgirish 
391844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: "
391944961713Sgirish 		"rx_rbr_rings 0x%016llx rings 0x%016llx",
392044961713Sgirish 		rx_rbr_rings, rx_rcr_rings));
392144961713Sgirish 
392244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop"));
392344961713Sgirish }
392444961713Sgirish 
392544961713Sgirish 
392644961713Sgirish static nxge_status_t
392744961713Sgirish nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel,
392844961713Sgirish     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
392944961713Sgirish 
393044961713Sgirish {
393144961713Sgirish 	npi_handle_t		handle;
393244961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
393344961713Sgirish 	rx_dma_ctl_stat_t	cs;
393444961713Sgirish 	rx_dma_ent_msk_t	ent_mask;
393544961713Sgirish 	nxge_status_t		status = NXGE_OK;
393644961713Sgirish 
393744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel"));
393844961713Sgirish 
393944961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
394044961713Sgirish 
394144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: "
394244961713Sgirish 		"npi handle addr $%p acc $%p",
394344961713Sgirish 		nxgep->npi_handle.regp, nxgep->npi_handle.regh));
394444961713Sgirish 
394544961713Sgirish 	/* Reset RXDMA channel */
394644961713Sgirish 	rs = npi_rxdma_cfg_rdc_reset(handle, channel);
394744961713Sgirish 	if (rs != NPI_SUCCESS) {
394844961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
394944961713Sgirish 			"==> nxge_rxdma_start_channel: "
395044961713Sgirish 			"reset rxdma failed (0x%08x channel %d)",
395144961713Sgirish 			status, channel));
395244961713Sgirish 		return (NXGE_ERROR | rs);
395344961713Sgirish 	}
395444961713Sgirish 
395544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
395644961713Sgirish 		"==> nxge_rxdma_start_channel: reset done: channel %d",
395744961713Sgirish 		channel));
395844961713Sgirish 
395944961713Sgirish 	/*
396044961713Sgirish 	 * Initialize the RXDMA channel specific FZC control
396144961713Sgirish 	 * configurations. These FZC registers are pertaining
396244961713Sgirish 	 * to each RX channel (logical pages).
396344961713Sgirish 	 */
396444961713Sgirish 	status = nxge_init_fzc_rxdma_channel(nxgep,
396544961713Sgirish 			channel, rbr_p, rcr_p, mbox_p);
396644961713Sgirish 	if (status != NXGE_OK) {
396744961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
396844961713Sgirish 			"==> nxge_rxdma_start_channel: "
396944961713Sgirish 			"init fzc rxdma failed (0x%08x channel %d)",
397044961713Sgirish 			status, channel));
397144961713Sgirish 		return (status);
397244961713Sgirish 	}
397344961713Sgirish 
397444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
397544961713Sgirish 		"==> nxge_rxdma_start_channel: fzc done"));
397644961713Sgirish 
397744961713Sgirish 	/*
397844961713Sgirish 	 * Zero out the shadow  and prefetch ram.
397944961713Sgirish 	 */
398044961713Sgirish 
398144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
398244961713Sgirish 		"ram done"));
398344961713Sgirish 
398444961713Sgirish 	/* Set up the interrupt event masks. */
398544961713Sgirish 	ent_mask.value = 0;
398644961713Sgirish 	ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK;
398744961713Sgirish 	rs = npi_rxdma_event_mask(handle, OP_SET, channel,
398844961713Sgirish 			&ent_mask);
398944961713Sgirish 	if (rs != NPI_SUCCESS) {
399044961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
399144961713Sgirish 			"==> nxge_rxdma_start_channel: "
399244961713Sgirish 			"init rxdma event masks failed (0x%08x channel %d)",
399344961713Sgirish 			status, channel));
399444961713Sgirish 		return (NXGE_ERROR | rs);
399544961713Sgirish 	}
399644961713Sgirish 
399744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
399844961713Sgirish 		"event done: channel %d (mask 0x%016llx)",
399944961713Sgirish 		channel, ent_mask.value));
400044961713Sgirish 
400144961713Sgirish 	/* Initialize the receive DMA control and status register */
400244961713Sgirish 	cs.value = 0;
400344961713Sgirish 	cs.bits.hdw.mex = 1;
400444961713Sgirish 	cs.bits.hdw.rcrthres = 1;
400544961713Sgirish 	cs.bits.hdw.rcrto = 1;
400644961713Sgirish 	cs.bits.hdw.rbr_empty = 1;
400744961713Sgirish 	status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs);
400844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
400944961713Sgirish 		"channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value));
401044961713Sgirish 	if (status != NXGE_OK) {
401144961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
401244961713Sgirish 			"==> nxge_rxdma_start_channel: "
401344961713Sgirish 			"init rxdma control register failed (0x%08x channel %d",
401444961713Sgirish 			status, channel));
401544961713Sgirish 		return (status);
401644961713Sgirish 	}
401744961713Sgirish 
401844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
401944961713Sgirish 		"control done - channel %d cs 0x%016llx", channel, cs.value));
402044961713Sgirish 
402144961713Sgirish 	/*
402244961713Sgirish 	 * Load RXDMA descriptors, buffers, mailbox,
402344961713Sgirish 	 * initialise the receive DMA channels and
402444961713Sgirish 	 * enable each DMA channel.
402544961713Sgirish 	 */
402644961713Sgirish 	status = nxge_enable_rxdma_channel(nxgep,
402744961713Sgirish 			channel, rbr_p, rcr_p, mbox_p);
402844961713Sgirish 
402944961713Sgirish 	if (status != NXGE_OK) {
403044961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
403144961713Sgirish 			    " nxge_rxdma_start_channel: "
403244961713Sgirish 			    " init enable rxdma failed (0x%08x channel %d)",
403344961713Sgirish 			    status, channel));
403444961713Sgirish 		return (status);
403544961713Sgirish 	}
403644961713Sgirish 
403744961713Sgirish 	ent_mask.value = 0;
403844961713Sgirish 	ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK |
403944961713Sgirish 				RX_DMA_ENT_MSK_PTDROP_PKT_MASK);
404044961713Sgirish 	rs = npi_rxdma_event_mask(handle, OP_SET, channel,
404144961713Sgirish 			&ent_mask);
404244961713Sgirish 	if (rs != NPI_SUCCESS) {
404344961713Sgirish 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
404444961713Sgirish 			"==> nxge_rxdma_start_channel: "
404544961713Sgirish 			"init rxdma event masks failed (0x%08x channel %d)",
404644961713Sgirish 			status, channel));
404744961713Sgirish 		return (NXGE_ERROR | rs);
404844961713Sgirish 	}
404944961713Sgirish 
405044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
405144961713Sgirish 		"control done - channel %d cs 0x%016llx", channel, cs.value));
405244961713Sgirish 
405344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
405444961713Sgirish 		"==> nxge_rxdma_start_channel: enable done"));
405544961713Sgirish 
405644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel"));
405744961713Sgirish 
405844961713Sgirish 	return (NXGE_OK);
405944961713Sgirish }
406044961713Sgirish 
406144961713Sgirish static nxge_status_t
406244961713Sgirish nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel)
406344961713Sgirish {
406444961713Sgirish 	npi_handle_t		handle;
406544961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
406644961713Sgirish 	rx_dma_ctl_stat_t	cs;
406744961713Sgirish 	rx_dma_ent_msk_t	ent_mask;
406844961713Sgirish 	nxge_status_t		status = NXGE_OK;
406944961713Sgirish 
407044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel"));
407144961713Sgirish 
407244961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
407344961713Sgirish 
407444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: "
407544961713Sgirish 		"npi handle addr $%p acc $%p",
407644961713Sgirish 		nxgep->npi_handle.regp, nxgep->npi_handle.regh));
407744961713Sgirish 
407844961713Sgirish 	/* Reset RXDMA channel */
407944961713Sgirish 	rs = npi_rxdma_cfg_rdc_reset(handle, channel);
408044961713Sgirish 	if (rs != NPI_SUCCESS) {
408144961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
408244961713Sgirish 			    " nxge_rxdma_stop_channel: "
408344961713Sgirish 			    " reset rxdma failed (0x%08x channel %d)",
408444961713Sgirish 			    rs, channel));
408544961713Sgirish 		return (NXGE_ERROR | rs);
408644961713Sgirish 	}
408744961713Sgirish 
408844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
408944961713Sgirish 		"==> nxge_rxdma_stop_channel: reset done"));
409044961713Sgirish 
409144961713Sgirish 	/* Set up the interrupt event masks. */
409244961713Sgirish 	ent_mask.value = RX_DMA_ENT_MSK_ALL;
409344961713Sgirish 	rs = npi_rxdma_event_mask(handle, OP_SET, channel,
409444961713Sgirish 			&ent_mask);
409544961713Sgirish 	if (rs != NPI_SUCCESS) {
409644961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
409744961713Sgirish 			    "==> nxge_rxdma_stop_channel: "
409844961713Sgirish 			    "set rxdma event masks failed (0x%08x channel %d)",
409944961713Sgirish 			    rs, channel));
410044961713Sgirish 		return (NXGE_ERROR | rs);
410144961713Sgirish 	}
410244961713Sgirish 
410344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
410444961713Sgirish 		"==> nxge_rxdma_stop_channel: event done"));
410544961713Sgirish 
410644961713Sgirish 	/* Initialize the receive DMA control and status register */
410744961713Sgirish 	cs.value = 0;
410844961713Sgirish 	status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel,
410944961713Sgirish 			&cs);
411044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control "
411144961713Sgirish 		" to default (all 0s) 0x%08x", cs.value));
411244961713Sgirish 	if (status != NXGE_OK) {
411344961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
411444961713Sgirish 			    " nxge_rxdma_stop_channel: init rxdma"
411544961713Sgirish 			    " control register failed (0x%08x channel %d",
411644961713Sgirish 			status, channel));
411744961713Sgirish 		return (status);
411844961713Sgirish 	}
411944961713Sgirish 
412044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
412144961713Sgirish 		"==> nxge_rxdma_stop_channel: control done"));
412244961713Sgirish 
412344961713Sgirish 	/* disable dma channel */
412444961713Sgirish 	status = nxge_disable_rxdma_channel(nxgep, channel);
412544961713Sgirish 
412644961713Sgirish 	if (status != NXGE_OK) {
412744961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
412844961713Sgirish 			    " nxge_rxdma_stop_channel: "
412944961713Sgirish 			    " init enable rxdma failed (0x%08x channel %d)",
413044961713Sgirish 			    status, channel));
413144961713Sgirish 		return (status);
413244961713Sgirish 	}
413344961713Sgirish 
413444961713Sgirish 	NXGE_DEBUG_MSG((nxgep,
413544961713Sgirish 		RX_CTL, "==> nxge_rxdma_stop_channel: disable done"));
413644961713Sgirish 
413744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel"));
413844961713Sgirish 
413944961713Sgirish 	return (NXGE_OK);
414044961713Sgirish }
414144961713Sgirish 
414244961713Sgirish nxge_status_t
414344961713Sgirish nxge_rxdma_handle_sys_errors(p_nxge_t nxgep)
414444961713Sgirish {
414544961713Sgirish 	npi_handle_t		handle;
414644961713Sgirish 	p_nxge_rdc_sys_stats_t	statsp;
414744961713Sgirish 	rx_ctl_dat_fifo_stat_t	stat;
414844961713Sgirish 	uint32_t		zcp_err_status;
414944961713Sgirish 	uint32_t		ipp_err_status;
415044961713Sgirish 	nxge_status_t		status = NXGE_OK;
415144961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
415244961713Sgirish 	boolean_t		my_err = B_FALSE;
415344961713Sgirish 
415444961713Sgirish 	handle = nxgep->npi_handle;
415544961713Sgirish 	statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats;
415644961713Sgirish 
415744961713Sgirish 	rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat);
415844961713Sgirish 
415944961713Sgirish 	if (rs != NPI_SUCCESS)
416044961713Sgirish 		return (NXGE_ERROR | rs);
416144961713Sgirish 
416244961713Sgirish 	if (stat.bits.ldw.id_mismatch) {
416344961713Sgirish 		statsp->id_mismatch++;
416444961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL,
416544961713Sgirish 					NXGE_FM_EREPORT_RDMC_ID_MISMATCH);
416644961713Sgirish 		/* Global fatal error encountered */
416744961713Sgirish 	}
416844961713Sgirish 
416944961713Sgirish 	if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) {
417044961713Sgirish 		switch (nxgep->mac.portnum) {
417144961713Sgirish 		case 0:
417244961713Sgirish 			if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) ||
417344961713Sgirish 				(stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) {
417444961713Sgirish 				my_err = B_TRUE;
417544961713Sgirish 				zcp_err_status = stat.bits.ldw.zcp_eop_err;
417644961713Sgirish 				ipp_err_status = stat.bits.ldw.ipp_eop_err;
417744961713Sgirish 			}
417844961713Sgirish 			break;
417944961713Sgirish 		case 1:
418044961713Sgirish 			if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) ||
418144961713Sgirish 				(stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) {
418244961713Sgirish 				my_err = B_TRUE;
418344961713Sgirish 				zcp_err_status = stat.bits.ldw.zcp_eop_err;
418444961713Sgirish 				ipp_err_status = stat.bits.ldw.ipp_eop_err;
418544961713Sgirish 			}
418644961713Sgirish 			break;
418744961713Sgirish 		case 2:
418844961713Sgirish 			if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) ||
418944961713Sgirish 				(stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) {
419044961713Sgirish 				my_err = B_TRUE;
419144961713Sgirish 				zcp_err_status = stat.bits.ldw.zcp_eop_err;
419244961713Sgirish 				ipp_err_status = stat.bits.ldw.ipp_eop_err;
419344961713Sgirish 			}
419444961713Sgirish 			break;
419544961713Sgirish 		case 3:
419644961713Sgirish 			if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) ||
419744961713Sgirish 				(stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) {
419844961713Sgirish 				my_err = B_TRUE;
419944961713Sgirish 				zcp_err_status = stat.bits.ldw.zcp_eop_err;
420044961713Sgirish 				ipp_err_status = stat.bits.ldw.ipp_eop_err;
420144961713Sgirish 			}
420244961713Sgirish 			break;
420344961713Sgirish 		default:
420444961713Sgirish 			return (NXGE_ERROR);
420544961713Sgirish 		}
420644961713Sgirish 	}
420744961713Sgirish 
420844961713Sgirish 	if (my_err) {
420944961713Sgirish 		status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status,
421044961713Sgirish 							zcp_err_status);
421144961713Sgirish 		if (status != NXGE_OK)
421244961713Sgirish 			return (status);
421344961713Sgirish 	}
421444961713Sgirish 
421544961713Sgirish 	return (NXGE_OK);
421644961713Sgirish }
421744961713Sgirish 
421844961713Sgirish static nxge_status_t
421944961713Sgirish nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status,
422044961713Sgirish 							uint32_t zcp_status)
422144961713Sgirish {
422244961713Sgirish 	boolean_t		rxport_fatal = B_FALSE;
422344961713Sgirish 	p_nxge_rdc_sys_stats_t	statsp;
422444961713Sgirish 	nxge_status_t		status = NXGE_OK;
422544961713Sgirish 	uint8_t			portn;
422644961713Sgirish 
422744961713Sgirish 	portn = nxgep->mac.portnum;
422844961713Sgirish 	statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats;
422944961713Sgirish 
423044961713Sgirish 	if (ipp_status & (0x1 << portn)) {
423144961713Sgirish 		statsp->ipp_eop_err++;
423244961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
423344961713Sgirish 					NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR);
423444961713Sgirish 		rxport_fatal = B_TRUE;
423544961713Sgirish 	}
423644961713Sgirish 
423744961713Sgirish 	if (zcp_status & (0x1 << portn)) {
423844961713Sgirish 		statsp->zcp_eop_err++;
423944961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
424044961713Sgirish 					NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR);
424144961713Sgirish 		rxport_fatal = B_TRUE;
424244961713Sgirish 	}
424344961713Sgirish 
424444961713Sgirish 	if (rxport_fatal) {
424544961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
424644961713Sgirish 			    " nxge_rxdma_handle_port_error: "
424744961713Sgirish 			    " fatal error on Port #%d\n",
424844961713Sgirish 				portn));
424944961713Sgirish 		status = nxge_rx_port_fatal_err_recover(nxgep);
425044961713Sgirish 		if (status == NXGE_OK) {
425144961713Sgirish 			FM_SERVICE_RESTORED(nxgep);
425244961713Sgirish 		}
425344961713Sgirish 	}
425444961713Sgirish 
425544961713Sgirish 	return (status);
425644961713Sgirish }
425744961713Sgirish 
425844961713Sgirish static nxge_status_t
425944961713Sgirish nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel)
426044961713Sgirish {
426144961713Sgirish 	npi_handle_t		handle;
426244961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
426344961713Sgirish 	nxge_status_t		status = NXGE_OK;
426444961713Sgirish 	p_rx_rbr_ring_t		rbrp;
426544961713Sgirish 	p_rx_rcr_ring_t		rcrp;
426644961713Sgirish 	p_rx_mbox_t		mboxp;
426744961713Sgirish 	rx_dma_ent_msk_t	ent_mask;
426844961713Sgirish 	p_nxge_dma_common_t	dmap;
426944961713Sgirish 	int			ring_idx;
427044961713Sgirish 	uint32_t		ref_cnt;
427144961713Sgirish 	p_rx_msg_t		rx_msg_p;
427244961713Sgirish 	int			i;
427344961713Sgirish 	uint32_t		nxge_port_rcr_size;
427444961713Sgirish 
427544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover"));
427644961713Sgirish 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
427744961713Sgirish 			"Recovering from RxDMAChannel#%d error...", channel));
427844961713Sgirish 
427944961713Sgirish 	/*
428044961713Sgirish 	 * Stop the dma channel waits for the stop done.
428144961713Sgirish 	 * If the stop done bit is not set, then create
428244961713Sgirish 	 * an error.
428344961713Sgirish 	 */
428444961713Sgirish 
428544961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
428644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop..."));
428744961713Sgirish 
428844961713Sgirish 	ring_idx = nxge_rxdma_get_ring_index(nxgep, channel);
428944961713Sgirish 	rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[ring_idx];
429044961713Sgirish 	rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[ring_idx];
429144961713Sgirish 
429244961713Sgirish 	MUTEX_ENTER(&rcrp->lock);
429344961713Sgirish 	MUTEX_ENTER(&rbrp->lock);
429444961713Sgirish 	MUTEX_ENTER(&rbrp->post_lock);
429544961713Sgirish 
429644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel..."));
429744961713Sgirish 
429844961713Sgirish 	rs = npi_rxdma_cfg_rdc_disable(handle, channel);
429944961713Sgirish 	if (rs != NPI_SUCCESS) {
430044961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
430144961713Sgirish 			"nxge_disable_rxdma_channel:failed"));
430244961713Sgirish 		goto fail;
430344961713Sgirish 	}
430444961713Sgirish 
430544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt..."));
430644961713Sgirish 
430744961713Sgirish 	/* Disable interrupt */
430844961713Sgirish 	ent_mask.value = RX_DMA_ENT_MSK_ALL;
430944961713Sgirish 	rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
431044961713Sgirish 	if (rs != NPI_SUCCESS) {
431144961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
431244961713Sgirish 				"nxge_rxdma_stop_channel: "
431344961713Sgirish 				"set rxdma event masks failed (channel %d)",
431444961713Sgirish 				channel));
431544961713Sgirish 	}
431644961713Sgirish 
431744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset..."));
431844961713Sgirish 
431944961713Sgirish 	/* Reset RXDMA channel */
432044961713Sgirish 	rs = npi_rxdma_cfg_rdc_reset(handle, channel);
432144961713Sgirish 	if (rs != NPI_SUCCESS) {
432244961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
432344961713Sgirish 			"nxge_rxdma_fatal_err_recover: "
432444961713Sgirish 				" reset rxdma failed (channel %d)", channel));
432544961713Sgirish 		goto fail;
432644961713Sgirish 	}
432744961713Sgirish 
432844961713Sgirish 	nxge_port_rcr_size = nxgep->nxge_port_rcr_size;
432944961713Sgirish 
433044961713Sgirish 	mboxp =
433144961713Sgirish 	(p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx];
433244961713Sgirish 
433344961713Sgirish 	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
433444961713Sgirish 	rbrp->rbr_rd_index = 0;
433544961713Sgirish 
433644961713Sgirish 	rcrp->comp_rd_index = 0;
433744961713Sgirish 	rcrp->comp_wt_index = 0;
433844961713Sgirish 	rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
433944961713Sgirish 		(p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
434044961713Sgirish 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
4341adfcba55Sjoycey #if defined(__i386)
4342adfcba55Sjoycey 		(p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
4343adfcba55Sjoycey #else
434444961713Sgirish 		(p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
4345adfcba55Sjoycey #endif
434644961713Sgirish 
434744961713Sgirish 	rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
434844961713Sgirish 		(nxge_port_rcr_size - 1);
434944961713Sgirish 	rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
435044961713Sgirish 		(nxge_port_rcr_size - 1);
435144961713Sgirish 
435244961713Sgirish 	dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
435344961713Sgirish 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
435444961713Sgirish 
435544961713Sgirish 	cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size);
435644961713Sgirish 
435744961713Sgirish 	for (i = 0; i < rbrp->rbr_max_size; i++) {
435844961713Sgirish 		rx_msg_p = rbrp->rx_msg_ring[i];
435944961713Sgirish 		ref_cnt = rx_msg_p->ref_cnt;
436044961713Sgirish 		if (ref_cnt != 1) {
4361a3c5bd6dSspeer 			if (rx_msg_p->cur_usage_cnt !=
4362a3c5bd6dSspeer 					rx_msg_p->max_usage_cnt) {
436344961713Sgirish 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
436444961713Sgirish 						"buf[%d]: cur_usage_cnt = %d "
436544961713Sgirish 						"max_usage_cnt = %d\n", i,
436644961713Sgirish 						rx_msg_p->cur_usage_cnt,
436744961713Sgirish 						rx_msg_p->max_usage_cnt));
4368a3c5bd6dSspeer 			} else {
4369a3c5bd6dSspeer 				/* Buffer can be re-posted */
4370a3c5bd6dSspeer 				rx_msg_p->free = B_TRUE;
4371a3c5bd6dSspeer 				rx_msg_p->cur_usage_cnt = 0;
4372a3c5bd6dSspeer 				rx_msg_p->max_usage_cnt = 0xbaddcafe;
4373a3c5bd6dSspeer 				rx_msg_p->pkt_buf_size = 0;
4374a3c5bd6dSspeer 			}
437544961713Sgirish 		}
437644961713Sgirish 	}
437744961713Sgirish 
437844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start..."));
437944961713Sgirish 
438044961713Sgirish 	status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp);
438144961713Sgirish 	if (status != NXGE_OK) {
438244961713Sgirish 		goto fail;
438344961713Sgirish 	}
438444961713Sgirish 
438544961713Sgirish 	MUTEX_EXIT(&rbrp->post_lock);
438644961713Sgirish 	MUTEX_EXIT(&rbrp->lock);
438744961713Sgirish 	MUTEX_EXIT(&rcrp->lock);
438844961713Sgirish 
438944961713Sgirish 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
439044961713Sgirish 			"Recovery Successful, RxDMAChannel#%d Restored",
439144961713Sgirish 			channel));
439244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover"));
439344961713Sgirish 
439444961713Sgirish 	return (NXGE_OK);
439544961713Sgirish fail:
439644961713Sgirish 	MUTEX_EXIT(&rbrp->post_lock);
439744961713Sgirish 	MUTEX_EXIT(&rbrp->lock);
439844961713Sgirish 	MUTEX_EXIT(&rcrp->lock);
439944961713Sgirish 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
440044961713Sgirish 
440144961713Sgirish 	return (NXGE_ERROR | rs);
440244961713Sgirish }
440344961713Sgirish 
440444961713Sgirish nxge_status_t
440544961713Sgirish nxge_rx_port_fatal_err_recover(p_nxge_t nxgep)
440644961713Sgirish {
440744961713Sgirish 	nxge_status_t		status = NXGE_OK;
440844961713Sgirish 	p_nxge_dma_common_t	*dma_buf_p;
440944961713Sgirish 	uint16_t		channel;
441044961713Sgirish 	int			ndmas;
441144961713Sgirish 	int			i;
441244961713Sgirish 
441344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover"));
441444961713Sgirish 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
441544961713Sgirish 				"Recovering from RxPort error..."));
441644961713Sgirish 	/* Disable RxMAC */
441744961713Sgirish 
441844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxMAC...\n"));
441944961713Sgirish 	if (nxge_rx_mac_disable(nxgep) != NXGE_OK)
442044961713Sgirish 		goto fail;
442144961713Sgirish 
442244961713Sgirish 	NXGE_DELAY(1000);
442344961713Sgirish 
442444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stop all RxDMA channels..."));
442544961713Sgirish 
442644961713Sgirish 	ndmas = nxgep->rx_buf_pool_p->ndmas;
442744961713Sgirish 	dma_buf_p = nxgep->rx_buf_pool_p->dma_buf_pool_p;
442844961713Sgirish 
442944961713Sgirish 	for (i = 0; i < ndmas; i++) {
443044961713Sgirish 		channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel;
443144961713Sgirish 		if (nxge_rxdma_fatal_err_recover(nxgep, channel) != NXGE_OK) {
443244961713Sgirish 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
443344961713Sgirish 					"Could not recover channel %d",
443444961713Sgirish 					channel));
443544961713Sgirish 		}
443644961713Sgirish 	}
443744961713Sgirish 
443844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset IPP..."));
443944961713Sgirish 
444044961713Sgirish 	/* Reset IPP */
444144961713Sgirish 	if (nxge_ipp_reset(nxgep) != NXGE_OK) {
444244961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
444344961713Sgirish 			"nxge_rx_port_fatal_err_recover: "
444444961713Sgirish 			"Failed to reset IPP"));
444544961713Sgirish 		goto fail;
444644961713Sgirish 	}
444744961713Sgirish 
444844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC..."));
444944961713Sgirish 
445044961713Sgirish 	/* Reset RxMAC */
445144961713Sgirish 	if (nxge_rx_mac_reset(nxgep) != NXGE_OK) {
445244961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
445344961713Sgirish 			"nxge_rx_port_fatal_err_recover: "
445444961713Sgirish 			"Failed to reset RxMAC"));
445544961713Sgirish 		goto fail;
445644961713Sgirish 	}
445744961713Sgirish 
445844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP..."));
445944961713Sgirish 
446044961713Sgirish 	/* Re-Initialize IPP */
446144961713Sgirish 	if (nxge_ipp_init(nxgep) != NXGE_OK) {
446244961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
446344961713Sgirish 			"nxge_rx_port_fatal_err_recover: "
446444961713Sgirish 			"Failed to init IPP"));
446544961713Sgirish 		goto fail;
446644961713Sgirish 	}
446744961713Sgirish 
446844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC..."));
446944961713Sgirish 
447044961713Sgirish 	/* Re-Initialize RxMAC */
447144961713Sgirish 	if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) {
447244961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
447344961713Sgirish 			"nxge_rx_port_fatal_err_recover: "
447444961713Sgirish 			"Failed to reset RxMAC"));
447544961713Sgirish 		goto fail;
447644961713Sgirish 	}
447744961713Sgirish 
447844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC..."));
447944961713Sgirish 
448044961713Sgirish 	/* Re-enable RxMAC */
448144961713Sgirish 	if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) {
448244961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
448344961713Sgirish 			"nxge_rx_port_fatal_err_recover: "
448444961713Sgirish 			"Failed to enable RxMAC"));
448544961713Sgirish 		goto fail;
448644961713Sgirish 	}
448744961713Sgirish 
448844961713Sgirish 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
448944961713Sgirish 			"Recovery Successful, RxPort Restored"));
449044961713Sgirish 
449144961713Sgirish 	return (NXGE_OK);
449244961713Sgirish fail:
449344961713Sgirish 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
449444961713Sgirish 	return (status);
449544961713Sgirish }
449644961713Sgirish 
449744961713Sgirish void
449844961713Sgirish nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan)
449944961713Sgirish {
450044961713Sgirish 	rx_dma_ctl_stat_t	cs;
450144961713Sgirish 	rx_ctl_dat_fifo_stat_t	cdfs;
450244961713Sgirish 
450344961713Sgirish 	switch (err_id) {
450444961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR:
450544961713Sgirish 	case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR:
450644961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR:
450744961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR:
450844961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RBR_TMOUT:
450944961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR:
451044961713Sgirish 	case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS:
451144961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR:
451244961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RCRINCON:
451344961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RCRFULL:
451444961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RBRFULL:
451544961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE:
451644961713Sgirish 	case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE:
451744961713Sgirish 	case NXGE_FM_EREPORT_RDMC_CONFIG_ERR:
451844961713Sgirish 		RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG,
451944961713Sgirish 			chan, &cs.value);
452044961713Sgirish 		if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR)
452144961713Sgirish 			cs.bits.hdw.rcr_ack_err = 1;
452244961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR)
452344961713Sgirish 			cs.bits.hdw.dc_fifo_err = 1;
452444961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR)
452544961713Sgirish 			cs.bits.hdw.rcr_sha_par = 1;
452644961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR)
452744961713Sgirish 			cs.bits.hdw.rbr_pre_par = 1;
452844961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT)
452944961713Sgirish 			cs.bits.hdw.rbr_tmout = 1;
453044961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR)
453144961713Sgirish 			cs.bits.hdw.rsp_cnt_err = 1;
453244961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS)
453344961713Sgirish 			cs.bits.hdw.byte_en_bus = 1;
453444961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR)
453544961713Sgirish 			cs.bits.hdw.rsp_dat_err = 1;
453644961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR)
453744961713Sgirish 			cs.bits.hdw.config_err = 1;
453844961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON)
453944961713Sgirish 			cs.bits.hdw.rcrincon = 1;
454044961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL)
454144961713Sgirish 			cs.bits.hdw.rcrfull = 1;
454244961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL)
454344961713Sgirish 			cs.bits.hdw.rbrfull = 1;
454444961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE)
454544961713Sgirish 			cs.bits.hdw.rbrlogpage = 1;
454644961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE)
454744961713Sgirish 			cs.bits.hdw.cfiglogpage = 1;
4548adfcba55Sjoycey #if defined(__i386)
4549adfcba55Sjoycey 		cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n",
4550adfcba55Sjoycey 				cs.value);
4551adfcba55Sjoycey #else
455244961713Sgirish 		cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n",
455344961713Sgirish 				cs.value);
4554adfcba55Sjoycey #endif
455544961713Sgirish 		RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG,
455644961713Sgirish 			chan, cs.value);
455744961713Sgirish 		break;
455844961713Sgirish 	case NXGE_FM_EREPORT_RDMC_ID_MISMATCH:
455944961713Sgirish 	case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR:
456044961713Sgirish 	case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR:
456144961713Sgirish 		cdfs.value = 0;
456244961713Sgirish 		if (err_id ==  NXGE_FM_EREPORT_RDMC_ID_MISMATCH)
456344961713Sgirish 			cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum);
456444961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR)
456544961713Sgirish 			cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum);
456644961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR)
456744961713Sgirish 			cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum);
4568adfcba55Sjoycey #if defined(__i386)
4569adfcba55Sjoycey 		cmn_err(CE_NOTE,
4570adfcba55Sjoycey 			"!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n",
4571adfcba55Sjoycey 			cdfs.value);
4572adfcba55Sjoycey #else
457344961713Sgirish 		cmn_err(CE_NOTE,
457444961713Sgirish 			"!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n",
457544961713Sgirish 			cdfs.value);
4576adfcba55Sjoycey #endif
457744961713Sgirish 		RXDMA_REG_WRITE64(nxgep->npi_handle,
457844961713Sgirish 			RX_CTL_DAT_FIFO_STAT_DBG_REG, chan, cdfs.value);
457944961713Sgirish 		break;
458044961713Sgirish 	case NXGE_FM_EREPORT_RDMC_DCF_ERR:
458144961713Sgirish 		break;
458253f3d8ecSyc 	case NXGE_FM_EREPORT_RDMC_RCR_ERR:
458344961713Sgirish 		break;
458444961713Sgirish 	}
458544961713Sgirish }
4586