144961713Sgirish /*
244961713Sgirish  * CDDL HEADER START
344961713Sgirish  *
444961713Sgirish  * The contents of this file are subject to the terms of the
544961713Sgirish  * Common Development and Distribution License (the "License").
644961713Sgirish  * You may not use this file except in compliance with the License.
744961713Sgirish  *
844961713Sgirish  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
944961713Sgirish  * or http://www.opensolaris.org/os/licensing.
1044961713Sgirish  * See the License for the specific language governing permissions
1144961713Sgirish  * and limitations under the License.
1244961713Sgirish  *
1344961713Sgirish  * When distributing Covered Code, include this CDDL HEADER in each
1444961713Sgirish  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
1544961713Sgirish  * If applicable, add the following below this CDDL HEADER, with the
1644961713Sgirish  * fields enclosed by brackets "[]" replaced with your own identifying
1744961713Sgirish  * information: Portions Copyright [yyyy] [name of copyright owner]
1844961713Sgirish  *
1944961713Sgirish  * CDDL HEADER END
2044961713Sgirish  */
2144961713Sgirish /*
22a3c5bd6dSspeer  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
2344961713Sgirish  * Use is subject to license terms.
2444961713Sgirish  */
2544961713Sgirish 
2644961713Sgirish #pragma ident	"%Z%%M%	%I%	%E% SMI"
2744961713Sgirish 
2844961713Sgirish #include <sys/nxge/nxge_impl.h>
2944961713Sgirish #include <sys/nxge/nxge_rxdma.h>
3044961713Sgirish 
3144961713Sgirish #define	NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp)	\
3244961713Sgirish 	(rdcgrp + nxgep->pt_config.hw_config.start_rdc_grpid)
3344961713Sgirish #define	NXGE_ACTUAL_RDC(nxgep, rdc)	\
3444961713Sgirish 	(rdc + nxgep->pt_config.hw_config.start_rdc)
3544961713Sgirish 
3644961713Sgirish /*
3744961713Sgirish  * Globals: tunable parameters (/etc/system or adb)
3844961713Sgirish  *
3944961713Sgirish  */
4044961713Sgirish extern uint32_t nxge_rbr_size;
4144961713Sgirish extern uint32_t nxge_rcr_size;
4244961713Sgirish extern uint32_t	nxge_rbr_spare_size;
4344961713Sgirish 
4444961713Sgirish extern uint32_t nxge_mblks_pending;
4544961713Sgirish 
4644961713Sgirish /*
4744961713Sgirish  * Tunable to reduce the amount of time spent in the
4844961713Sgirish  * ISR doing Rx Processing.
4944961713Sgirish  */
5044961713Sgirish extern uint32_t nxge_max_rx_pkts;
5144961713Sgirish boolean_t nxge_jumbo_enable;
5244961713Sgirish 
5344961713Sgirish /*
5444961713Sgirish  * Tunables to manage the receive buffer blocks.
5544961713Sgirish  *
5644961713Sgirish  * nxge_rx_threshold_hi: copy all buffers.
5744961713Sgirish  * nxge_rx_bcopy_size_type: receive buffer block size type.
5844961713Sgirish  * nxge_rx_threshold_lo: copy only up to tunable block size type.
5944961713Sgirish  */
6044961713Sgirish extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi;
6144961713Sgirish extern nxge_rxbuf_type_t nxge_rx_buf_size_type;
6244961713Sgirish extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo;
6344961713Sgirish 
6444961713Sgirish static nxge_status_t nxge_map_rxdma(p_nxge_t);
6544961713Sgirish static void nxge_unmap_rxdma(p_nxge_t);
6644961713Sgirish 
6744961713Sgirish static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t);
6844961713Sgirish static void nxge_rxdma_hw_stop_common(p_nxge_t);
6944961713Sgirish 
7044961713Sgirish static nxge_status_t nxge_rxdma_hw_start(p_nxge_t);
7144961713Sgirish static void nxge_rxdma_hw_stop(p_nxge_t);
7244961713Sgirish 
7344961713Sgirish static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t,
7444961713Sgirish     p_nxge_dma_common_t *,  p_rx_rbr_ring_t *,
7544961713Sgirish     uint32_t,
7644961713Sgirish     p_nxge_dma_common_t *, p_rx_rcr_ring_t *,
7744961713Sgirish     p_rx_mbox_t *);
7844961713Sgirish static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t,
7944961713Sgirish     p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
8044961713Sgirish 
8144961713Sgirish static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t,
8244961713Sgirish     uint16_t,
8344961713Sgirish     p_nxge_dma_common_t *, p_rx_rbr_ring_t *,
8444961713Sgirish     p_rx_rcr_ring_t *, p_rx_mbox_t *);
8544961713Sgirish static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t,
8644961713Sgirish     p_rx_rcr_ring_t, p_rx_mbox_t);
8744961713Sgirish 
8844961713Sgirish static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t,
8944961713Sgirish     uint16_t,
9044961713Sgirish     p_nxge_dma_common_t *,
9144961713Sgirish     p_rx_rbr_ring_t *, uint32_t);
9244961713Sgirish static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t,
9344961713Sgirish     p_rx_rbr_ring_t);
9444961713Sgirish 
9544961713Sgirish static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t,
9644961713Sgirish     p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
9744961713Sgirish static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t);
9844961713Sgirish 
9944961713Sgirish mblk_t *
10044961713Sgirish nxge_rx_pkts(p_nxge_t, uint_t, p_nxge_ldv_t,
10144961713Sgirish     p_rx_rcr_ring_t *, rx_dma_ctl_stat_t);
10244961713Sgirish 
10344961713Sgirish static void nxge_receive_packet(p_nxge_t,
10444961713Sgirish 	p_rx_rcr_ring_t,
10544961713Sgirish 	p_rcr_entry_t,
10644961713Sgirish 	boolean_t *,
10744961713Sgirish 	mblk_t **, mblk_t **);
10844961713Sgirish 
10944961713Sgirish nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t);
11044961713Sgirish 
11144961713Sgirish static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t);
11244961713Sgirish static void nxge_freeb(p_rx_msg_t);
11344961713Sgirish static void nxge_rx_pkts_vring(p_nxge_t, uint_t,
11444961713Sgirish     p_nxge_ldv_t, rx_dma_ctl_stat_t);
11544961713Sgirish static nxge_status_t nxge_rx_err_evnts(p_nxge_t, uint_t,
11644961713Sgirish 				p_nxge_ldv_t, rx_dma_ctl_stat_t);
11744961713Sgirish 
11844961713Sgirish static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t,
11944961713Sgirish 				uint32_t, uint32_t);
12044961713Sgirish 
12144961713Sgirish static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t,
12244961713Sgirish     p_rx_rbr_ring_t);
12344961713Sgirish 
12444961713Sgirish 
12544961713Sgirish static nxge_status_t
12644961713Sgirish nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t);
12744961713Sgirish 
12844961713Sgirish nxge_status_t
12944961713Sgirish nxge_rx_port_fatal_err_recover(p_nxge_t);
13044961713Sgirish 
13114ea4bb7Ssd static uint16_t
13214ea4bb7Ssd nxge_get_pktbuf_size(p_nxge_t nxgep, int bufsz_type, rbr_cfig_b_t rbr_cfgb);
13344961713Sgirish 
13444961713Sgirish nxge_status_t
13544961713Sgirish nxge_init_rxdma_channels(p_nxge_t nxgep)
13644961713Sgirish {
13744961713Sgirish 	nxge_status_t	status = NXGE_OK;
13844961713Sgirish 
13944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels"));
14044961713Sgirish 
14144961713Sgirish 	status = nxge_map_rxdma(nxgep);
14244961713Sgirish 	if (status != NXGE_OK) {
14344961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
14444961713Sgirish 			"<== nxge_init_rxdma: status 0x%x", status));
14544961713Sgirish 		return (status);
14644961713Sgirish 	}
14744961713Sgirish 
14844961713Sgirish 	status = nxge_rxdma_hw_start_common(nxgep);
14944961713Sgirish 	if (status != NXGE_OK) {
15044961713Sgirish 		nxge_unmap_rxdma(nxgep);
15144961713Sgirish 	}
15244961713Sgirish 
15344961713Sgirish 	status = nxge_rxdma_hw_start(nxgep);
15444961713Sgirish 	if (status != NXGE_OK) {
15544961713Sgirish 		nxge_unmap_rxdma(nxgep);
15644961713Sgirish 	}
15744961713Sgirish 
15844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
15944961713Sgirish 		"<== nxge_init_rxdma_channels: status 0x%x", status));
16044961713Sgirish 
16144961713Sgirish 	return (status);
16244961713Sgirish }
16344961713Sgirish 
16444961713Sgirish void
16544961713Sgirish nxge_uninit_rxdma_channels(p_nxge_t nxgep)
16644961713Sgirish {
16744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels"));
16844961713Sgirish 
16944961713Sgirish 	nxge_rxdma_hw_stop(nxgep);
17044961713Sgirish 	nxge_rxdma_hw_stop_common(nxgep);
17144961713Sgirish 	nxge_unmap_rxdma(nxgep);
17244961713Sgirish 
17344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
17444961713Sgirish 		"<== nxge_uinit_rxdma_channels"));
17544961713Sgirish }
17644961713Sgirish 
17744961713Sgirish nxge_status_t
17844961713Sgirish nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel)
17944961713Sgirish {
18044961713Sgirish 	npi_handle_t		handle;
18144961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
18244961713Sgirish 	nxge_status_t		status = NXGE_OK;
18344961713Sgirish 
18444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel"));
18544961713Sgirish 
18644961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
18744961713Sgirish 	rs = npi_rxdma_cfg_rdc_reset(handle, channel);
18844961713Sgirish 
18944961713Sgirish 	if (rs != NPI_SUCCESS) {
19044961713Sgirish 		status = NXGE_ERROR | rs;
19144961713Sgirish 	}
19244961713Sgirish 
19344961713Sgirish 	return (status);
19444961713Sgirish }
19544961713Sgirish 
19644961713Sgirish void
19744961713Sgirish nxge_rxdma_regs_dump_channels(p_nxge_t nxgep)
19844961713Sgirish {
19944961713Sgirish 	int			i, ndmas;
20044961713Sgirish 	uint16_t		channel;
20144961713Sgirish 	p_rx_rbr_rings_t 	rx_rbr_rings;
20244961713Sgirish 	p_rx_rbr_ring_t		*rbr_rings;
20344961713Sgirish 	npi_handle_t		handle;
20444961713Sgirish 
20544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels"));
20644961713Sgirish 
20744961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
20844961713Sgirish 	(void) npi_rxdma_dump_fzc_regs(handle);
20944961713Sgirish 
21044961713Sgirish 	rx_rbr_rings = nxgep->rx_rbr_rings;
21144961713Sgirish 	if (rx_rbr_rings == NULL) {
21244961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
21344961713Sgirish 			"<== nxge_rxdma_regs_dump_channels: "
21444961713Sgirish 			"NULL ring pointer"));
21544961713Sgirish 		return;
21644961713Sgirish 	}
21744961713Sgirish 	if (rx_rbr_rings->rbr_rings == NULL) {
21844961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
21944961713Sgirish 			"<== nxge_rxdma_regs_dump_channels: "
22044961713Sgirish 			" NULL rbr rings pointer"));
22144961713Sgirish 		return;
22244961713Sgirish 	}
22344961713Sgirish 
22444961713Sgirish 	ndmas = rx_rbr_rings->ndmas;
22544961713Sgirish 	if (!ndmas) {
22644961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
22744961713Sgirish 			"<== nxge_rxdma_regs_dump_channels: no channel"));
22844961713Sgirish 		return;
22944961713Sgirish 	}
23044961713Sgirish 
23144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
23244961713Sgirish 		"==> nxge_rxdma_regs_dump_channels (ndmas %d)", ndmas));
23344961713Sgirish 
23444961713Sgirish 	rbr_rings = rx_rbr_rings->rbr_rings;
23544961713Sgirish 	for (i = 0; i < ndmas; i++) {
23644961713Sgirish 		if (rbr_rings == NULL || rbr_rings[i] == NULL) {
23744961713Sgirish 			continue;
23844961713Sgirish 		}
23944961713Sgirish 		channel = rbr_rings[i]->rdc;
24044961713Sgirish 		(void) nxge_dump_rxdma_channel(nxgep, channel);
24144961713Sgirish 	}
24244961713Sgirish 
24344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump"));
24444961713Sgirish 
24544961713Sgirish }
24644961713Sgirish 
24744961713Sgirish nxge_status_t
24844961713Sgirish nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel)
24944961713Sgirish {
25044961713Sgirish 	npi_handle_t		handle;
25144961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
25244961713Sgirish 	nxge_status_t		status = NXGE_OK;
25344961713Sgirish 
25444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel"));
25544961713Sgirish 
25644961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
25744961713Sgirish 	rs = npi_rxdma_dump_rdc_regs(handle, channel);
25844961713Sgirish 
25944961713Sgirish 	if (rs != NPI_SUCCESS) {
26044961713Sgirish 		status = NXGE_ERROR | rs;
26144961713Sgirish 	}
26244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel"));
26344961713Sgirish 	return (status);
26444961713Sgirish }
26544961713Sgirish 
26644961713Sgirish nxge_status_t
26744961713Sgirish nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
26844961713Sgirish     p_rx_dma_ent_msk_t mask_p)
26944961713Sgirish {
27044961713Sgirish 	npi_handle_t		handle;
27144961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
27244961713Sgirish 	nxge_status_t		status = NXGE_OK;
27344961713Sgirish 
27444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
27544961713Sgirish 		"<== nxge_init_rxdma_channel_event_mask"));
27644961713Sgirish 
27744961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
27844961713Sgirish 	rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p);
27944961713Sgirish 	if (rs != NPI_SUCCESS) {
28044961713Sgirish 		status = NXGE_ERROR | rs;
28144961713Sgirish 	}
28244961713Sgirish 
28344961713Sgirish 	return (status);
28444961713Sgirish }
28544961713Sgirish 
28644961713Sgirish nxge_status_t
28744961713Sgirish nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
28844961713Sgirish     p_rx_dma_ctl_stat_t cs_p)
28944961713Sgirish {
29044961713Sgirish 	npi_handle_t		handle;
29144961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
29244961713Sgirish 	nxge_status_t		status = NXGE_OK;
29344961713Sgirish 
29444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
29544961713Sgirish 		"<== nxge_init_rxdma_channel_cntl_stat"));
29644961713Sgirish 
29744961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
29844961713Sgirish 	rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p);
29944961713Sgirish 
30044961713Sgirish 	if (rs != NPI_SUCCESS) {
30144961713Sgirish 		status = NXGE_ERROR | rs;
30244961713Sgirish 	}
30344961713Sgirish 
30444961713Sgirish 	return (status);
30544961713Sgirish }
30644961713Sgirish 
30744961713Sgirish nxge_status_t
30844961713Sgirish nxge_rxdma_cfg_rdcgrp_default_rdc(p_nxge_t nxgep, uint8_t rdcgrp,
30944961713Sgirish 				    uint8_t rdc)
31044961713Sgirish {
31144961713Sgirish 	npi_handle_t		handle;
31244961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
31344961713Sgirish 	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
31444961713Sgirish 	p_nxge_rdc_grp_t	rdc_grp_p;
31544961713Sgirish 	uint8_t actual_rdcgrp, actual_rdc;
31644961713Sgirish 
31744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
31844961713Sgirish 			    " ==> nxge_rxdma_cfg_rdcgrp_default_rdc"));
31944961713Sgirish 	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
32044961713Sgirish 
32144961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
32244961713Sgirish 
32344961713Sgirish 	rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp];
32444961713Sgirish 	rdc_grp_p->rdc[0] = rdc;
32544961713Sgirish 
32644961713Sgirish 	actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp);
32744961713Sgirish 	actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc);
32844961713Sgirish 
32944961713Sgirish 	rs = npi_rxdma_cfg_rdc_table_default_rdc(handle, actual_rdcgrp,
33044961713Sgirish 							    actual_rdc);
33144961713Sgirish 
33244961713Sgirish 	if (rs != NPI_SUCCESS) {
33344961713Sgirish 		return (NXGE_ERROR | rs);
33444961713Sgirish 	}
33544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
33644961713Sgirish 			    " <== nxge_rxdma_cfg_rdcgrp_default_rdc"));
33744961713Sgirish 	return (NXGE_OK);
33844961713Sgirish }
33944961713Sgirish 
34044961713Sgirish nxge_status_t
34144961713Sgirish nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc)
34244961713Sgirish {
34344961713Sgirish 	npi_handle_t		handle;
34444961713Sgirish 
34544961713Sgirish 	uint8_t actual_rdc;
34644961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
34744961713Sgirish 
34844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
34944961713Sgirish 			    " ==> nxge_rxdma_cfg_port_default_rdc"));
35044961713Sgirish 
35144961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
35244961713Sgirish 	actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc);
35344961713Sgirish 	rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc);
35444961713Sgirish 
35544961713Sgirish 
35644961713Sgirish 	if (rs != NPI_SUCCESS) {
35744961713Sgirish 		return (NXGE_ERROR | rs);
35844961713Sgirish 	}
35944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
36044961713Sgirish 			    " <== nxge_rxdma_cfg_port_default_rdc"));
36144961713Sgirish 
36244961713Sgirish 	return (NXGE_OK);
36344961713Sgirish }
36444961713Sgirish 
36544961713Sgirish nxge_status_t
36644961713Sgirish nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel,
36744961713Sgirish 				    uint16_t pkts)
36844961713Sgirish {
36944961713Sgirish 	npi_status_t	rs = NPI_SUCCESS;
37044961713Sgirish 	npi_handle_t	handle;
37144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
37244961713Sgirish 			    " ==> nxge_rxdma_cfg_rcr_threshold"));
37344961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
37444961713Sgirish 
37544961713Sgirish 	rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts);
37644961713Sgirish 
37744961713Sgirish 	if (rs != NPI_SUCCESS) {
37844961713Sgirish 		return (NXGE_ERROR | rs);
37944961713Sgirish 	}
38044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold"));
38144961713Sgirish 	return (NXGE_OK);
38244961713Sgirish }
38344961713Sgirish 
38444961713Sgirish nxge_status_t
38544961713Sgirish nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel,
38644961713Sgirish 			    uint16_t tout, uint8_t enable)
38744961713Sgirish {
38844961713Sgirish 	npi_status_t	rs = NPI_SUCCESS;
38944961713Sgirish 	npi_handle_t	handle;
39044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout"));
39144961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
39244961713Sgirish 	if (enable == 0) {
39344961713Sgirish 		rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel);
39444961713Sgirish 	} else {
39544961713Sgirish 		rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
39644961713Sgirish 							    tout);
39744961713Sgirish 	}
39844961713Sgirish 
39944961713Sgirish 	if (rs != NPI_SUCCESS) {
40044961713Sgirish 		return (NXGE_ERROR | rs);
40144961713Sgirish 	}
40244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout"));
40344961713Sgirish 	return (NXGE_OK);
40444961713Sgirish }
40544961713Sgirish 
40644961713Sgirish nxge_status_t
40744961713Sgirish nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
40844961713Sgirish     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
40944961713Sgirish {
41044961713Sgirish 	npi_handle_t		handle;
41144961713Sgirish 	rdc_desc_cfg_t 		rdc_desc;
41244961713Sgirish 	p_rcrcfig_b_t		cfgb_p;
41344961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
41444961713Sgirish 
41544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel"));
41644961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
41744961713Sgirish 	/*
41844961713Sgirish 	 * Use configuration data composed at init time.
41944961713Sgirish 	 * Write to hardware the receive ring configurations.
42044961713Sgirish 	 */
42144961713Sgirish 	rdc_desc.mbox_enable = 1;
42244961713Sgirish 	rdc_desc.mbox_addr = mbox_p->mbox_addr;
42344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
42444961713Sgirish 		"==> nxge_enable_rxdma_channel: mboxp $%p($%p)",
42544961713Sgirish 		mbox_p->mbox_addr, rdc_desc.mbox_addr));
42644961713Sgirish 
42744961713Sgirish 	rdc_desc.rbr_len = rbr_p->rbb_max;
42844961713Sgirish 	rdc_desc.rbr_addr = rbr_p->rbr_addr;
42944961713Sgirish 
43044961713Sgirish 	switch (nxgep->rx_bksize_code) {
43144961713Sgirish 	case RBR_BKSIZE_4K:
43244961713Sgirish 		rdc_desc.page_size = SIZE_4KB;
43344961713Sgirish 		break;
43444961713Sgirish 	case RBR_BKSIZE_8K:
43544961713Sgirish 		rdc_desc.page_size = SIZE_8KB;
43644961713Sgirish 		break;
43744961713Sgirish 	case RBR_BKSIZE_16K:
43844961713Sgirish 		rdc_desc.page_size = SIZE_16KB;
43944961713Sgirish 		break;
44044961713Sgirish 	case RBR_BKSIZE_32K:
44144961713Sgirish 		rdc_desc.page_size = SIZE_32KB;
44244961713Sgirish 		break;
44344961713Sgirish 	}
44444961713Sgirish 
44544961713Sgirish 	rdc_desc.size0 = rbr_p->npi_pkt_buf_size0;
44644961713Sgirish 	rdc_desc.valid0 = 1;
44744961713Sgirish 
44844961713Sgirish 	rdc_desc.size1 = rbr_p->npi_pkt_buf_size1;
44944961713Sgirish 	rdc_desc.valid1 = 1;
45044961713Sgirish 
45144961713Sgirish 	rdc_desc.size2 = rbr_p->npi_pkt_buf_size2;
45244961713Sgirish 	rdc_desc.valid2 = 1;
45344961713Sgirish 
45444961713Sgirish 	rdc_desc.full_hdr = rcr_p->full_hdr_flag;
45544961713Sgirish 	rdc_desc.offset = rcr_p->sw_priv_hdr_len;
45644961713Sgirish 
45744961713Sgirish 	rdc_desc.rcr_len = rcr_p->comp_size;
45844961713Sgirish 	rdc_desc.rcr_addr = rcr_p->rcr_addr;
45944961713Sgirish 
46044961713Sgirish 	cfgb_p = &(rcr_p->rcr_cfgb);
46144961713Sgirish 	rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres;
46244961713Sgirish 	rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout;
46344961713Sgirish 	rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout;
46444961713Sgirish 
46544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: "
46644961713Sgirish 		"rbr_len qlen %d pagesize code %d rcr_len %d",
46744961713Sgirish 		rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len));
46844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: "
46944961713Sgirish 		"size 0 %d size 1 %d size 2 %d",
47044961713Sgirish 		rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1,
47144961713Sgirish 		rbr_p->npi_pkt_buf_size2));
47244961713Sgirish 
47344961713Sgirish 	rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc);
47444961713Sgirish 	if (rs != NPI_SUCCESS) {
47544961713Sgirish 		return (NXGE_ERROR | rs);
47644961713Sgirish 	}
47744961713Sgirish 
47844961713Sgirish 	/*
47944961713Sgirish 	 * Enable the timeout and threshold.
48044961713Sgirish 	 */
48144961713Sgirish 	rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel,
48244961713Sgirish 			rdc_desc.rcr_threshold);
48344961713Sgirish 	if (rs != NPI_SUCCESS) {
48444961713Sgirish 		return (NXGE_ERROR | rs);
48544961713Sgirish 	}
48644961713Sgirish 
48744961713Sgirish 	rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
48844961713Sgirish 			rdc_desc.rcr_timeout);
48944961713Sgirish 	if (rs != NPI_SUCCESS) {
49044961713Sgirish 		return (NXGE_ERROR | rs);
49144961713Sgirish 	}
49244961713Sgirish 
49344961713Sgirish 	/* Enable the DMA */
49444961713Sgirish 	rs = npi_rxdma_cfg_rdc_enable(handle, channel);
49544961713Sgirish 	if (rs != NPI_SUCCESS) {
49644961713Sgirish 		return (NXGE_ERROR | rs);
49744961713Sgirish 	}
49844961713Sgirish 
49944961713Sgirish 	/* Kick the DMA engine. */
50044961713Sgirish 	npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max);
50144961713Sgirish 	/* Clear the rbr empty bit */
50244961713Sgirish 	(void) npi_rxdma_channel_rbr_empty_clear(handle, channel);
50344961713Sgirish 
50444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel"));
50544961713Sgirish 
50644961713Sgirish 	return (NXGE_OK);
50744961713Sgirish }
50844961713Sgirish 
50944961713Sgirish nxge_status_t
51044961713Sgirish nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel)
51144961713Sgirish {
51244961713Sgirish 	npi_handle_t		handle;
51344961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
51444961713Sgirish 
51544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel"));
51644961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
51744961713Sgirish 
51844961713Sgirish 	/* disable the DMA */
51944961713Sgirish 	rs = npi_rxdma_cfg_rdc_disable(handle, channel);
52044961713Sgirish 	if (rs != NPI_SUCCESS) {
52144961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
52244961713Sgirish 			"<== nxge_disable_rxdma_channel:failed (0x%x)",
52344961713Sgirish 			rs));
52444961713Sgirish 		return (NXGE_ERROR | rs);
52544961713Sgirish 	}
52644961713Sgirish 
52744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel"));
52844961713Sgirish 	return (NXGE_OK);
52944961713Sgirish }
53044961713Sgirish 
53144961713Sgirish nxge_status_t
53244961713Sgirish nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel)
53344961713Sgirish {
53444961713Sgirish 	npi_handle_t		handle;
53544961713Sgirish 	nxge_status_t		status = NXGE_OK;
53644961713Sgirish 
53744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
53844961713Sgirish 		"<== nxge_init_rxdma_channel_rcrflush"));
53944961713Sgirish 
54044961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
54144961713Sgirish 	npi_rxdma_rdc_rcr_flush(handle, channel);
54244961713Sgirish 
54344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
54444961713Sgirish 		"<== nxge_init_rxdma_channel_rcrflsh"));
54544961713Sgirish 	return (status);
54644961713Sgirish 
54744961713Sgirish }
54844961713Sgirish 
54944961713Sgirish #define	MID_INDEX(l, r) ((r + l + 1) >> 1)
55044961713Sgirish 
55144961713Sgirish #define	TO_LEFT -1
55244961713Sgirish #define	TO_RIGHT 1
55344961713Sgirish #define	BOTH_RIGHT (TO_RIGHT + TO_RIGHT)
55444961713Sgirish #define	BOTH_LEFT (TO_LEFT + TO_LEFT)
55544961713Sgirish #define	IN_MIDDLE (TO_RIGHT + TO_LEFT)
55644961713Sgirish #define	NO_HINT 0xffffffff
55744961713Sgirish 
55844961713Sgirish /*ARGSUSED*/
55944961713Sgirish nxge_status_t
56044961713Sgirish nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p,
561a3c5bd6dSspeer 	uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp,
562a3c5bd6dSspeer 	uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index)
56344961713Sgirish {
56444961713Sgirish 	int			bufsize;
56544961713Sgirish 	uint64_t		pktbuf_pp;
56644961713Sgirish 	uint64_t 		dvma_addr;
56744961713Sgirish 	rxring_info_t 		*ring_info;
56844961713Sgirish 	int 			base_side, end_side;
56944961713Sgirish 	int 			r_index, l_index, anchor_index;
57044961713Sgirish 	int 			found, search_done;
57144961713Sgirish 	uint32_t offset, chunk_size, block_size, page_size_mask;
57244961713Sgirish 	uint32_t chunk_index, block_index, total_index;
57344961713Sgirish 	int 			max_iterations, iteration;
57444961713Sgirish 	rxbuf_index_info_t 	*bufinfo;
57544961713Sgirish 
57644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp"));
57744961713Sgirish 
57844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
57944961713Sgirish 		"==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d",
58044961713Sgirish 		pkt_buf_addr_pp,
58144961713Sgirish 		pktbufsz_type));
58244961713Sgirish 
58344961713Sgirish 	pktbuf_pp = (uint64_t)pkt_buf_addr_pp;
58444961713Sgirish 
58544961713Sgirish 	switch (pktbufsz_type) {
58644961713Sgirish 	case 0:
58744961713Sgirish 		bufsize = rbr_p->pkt_buf_size0;
58844961713Sgirish 		break;
58944961713Sgirish 	case 1:
59044961713Sgirish 		bufsize = rbr_p->pkt_buf_size1;
59144961713Sgirish 		break;
59244961713Sgirish 	case 2:
59344961713Sgirish 		bufsize = rbr_p->pkt_buf_size2;
59444961713Sgirish 		break;
59544961713Sgirish 	case RCR_SINGLE_BLOCK:
59644961713Sgirish 		bufsize = 0;
59744961713Sgirish 		anchor_index = 0;
59844961713Sgirish 		break;
59944961713Sgirish 	default:
60044961713Sgirish 		return (NXGE_ERROR);
60144961713Sgirish 	}
60244961713Sgirish 
60344961713Sgirish 	if (rbr_p->num_blocks == 1) {
60444961713Sgirish 		anchor_index = 0;
60544961713Sgirish 		ring_info = rbr_p->ring_info;
60644961713Sgirish 		bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
60744961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
60844961713Sgirish 			"==> nxge_rxbuf_pp_to_vp: (found, 1 block) "
60944961713Sgirish 			"buf_pp $%p btype %d anchor_index %d "
61044961713Sgirish 			"bufinfo $%p",
61144961713Sgirish 			pkt_buf_addr_pp,
61244961713Sgirish 			pktbufsz_type,
61344961713Sgirish 			anchor_index,
61444961713Sgirish 			bufinfo));
61544961713Sgirish 
61644961713Sgirish 		goto found_index;
61744961713Sgirish 	}
61844961713Sgirish 
61944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
62044961713Sgirish 		"==> nxge_rxbuf_pp_to_vp: "
62144961713Sgirish 		"buf_pp $%p btype %d  anchor_index %d",
62244961713Sgirish 		pkt_buf_addr_pp,
62344961713Sgirish 		pktbufsz_type,
62444961713Sgirish 		anchor_index));
62544961713Sgirish 
62644961713Sgirish 	ring_info = rbr_p->ring_info;
62744961713Sgirish 	found = B_FALSE;
62844961713Sgirish 	bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
62944961713Sgirish 	iteration = 0;
63044961713Sgirish 	max_iterations = ring_info->max_iterations;
63144961713Sgirish 		/*
632a3c5bd6dSspeer 		 * First check if this block has been seen
63344961713Sgirish 		 * recently. This is indicated by a hint which
63444961713Sgirish 		 * is initialized when the first buffer of the block
63544961713Sgirish 		 * is seen. The hint is reset when the last buffer of
63644961713Sgirish 		 * the block has been processed.
63744961713Sgirish 		 * As three block sizes are supported, three hints
63844961713Sgirish 		 * are kept. The idea behind the hints is that once
63944961713Sgirish 		 * the hardware  uses a block for a buffer  of that
64044961713Sgirish 		 * size, it will use it exclusively for that size
64144961713Sgirish 		 * and will use it until it is exhausted. It is assumed
64244961713Sgirish 		 * that there would a single block being used for the same
64344961713Sgirish 		 * buffer sizes at any given time.
64444961713Sgirish 		 */
64544961713Sgirish 	if (ring_info->hint[pktbufsz_type] != NO_HINT) {
64644961713Sgirish 		anchor_index = ring_info->hint[pktbufsz_type];
64744961713Sgirish 		dvma_addr =  bufinfo[anchor_index].dvma_addr;
64844961713Sgirish 		chunk_size = bufinfo[anchor_index].buf_size;
64944961713Sgirish 		if ((pktbuf_pp >= dvma_addr) &&
65044961713Sgirish 			(pktbuf_pp < (dvma_addr + chunk_size))) {
65144961713Sgirish 			found = B_TRUE;
65244961713Sgirish 				/*
65344961713Sgirish 				 * check if this is the last buffer in the block
65444961713Sgirish 				 * If so, then reset the hint for the size;
65544961713Sgirish 				 */
65644961713Sgirish 
65744961713Sgirish 			if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size))
65844961713Sgirish 				ring_info->hint[pktbufsz_type] = NO_HINT;
65944961713Sgirish 		}
66044961713Sgirish 	}
66144961713Sgirish 
66244961713Sgirish 	if (found == B_FALSE) {
66344961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
66444961713Sgirish 			"==> nxge_rxbuf_pp_to_vp: (!found)"
66544961713Sgirish 			"buf_pp $%p btype %d anchor_index %d",
66644961713Sgirish 			pkt_buf_addr_pp,
66744961713Sgirish 			pktbufsz_type,
66844961713Sgirish 			anchor_index));
66944961713Sgirish 
67044961713Sgirish 			/*
67144961713Sgirish 			 * This is the first buffer of the block of this
67244961713Sgirish 			 * size. Need to search the whole information
67344961713Sgirish 			 * array.
67444961713Sgirish 			 * the search algorithm uses a binary tree search
67544961713Sgirish 			 * algorithm. It assumes that the information is
67644961713Sgirish 			 * already sorted with increasing order
67744961713Sgirish 			 * info[0] < info[1] < info[2]  .... < info[n-1]
67844961713Sgirish 			 * where n is the size of the information array
67944961713Sgirish 			 */
68044961713Sgirish 		r_index = rbr_p->num_blocks - 1;
68144961713Sgirish 		l_index = 0;
68244961713Sgirish 		search_done = B_FALSE;
68344961713Sgirish 		anchor_index = MID_INDEX(r_index, l_index);
68444961713Sgirish 		while (search_done == B_FALSE) {
68544961713Sgirish 			if ((r_index == l_index) ||
68644961713Sgirish 				(iteration >= max_iterations))
68744961713Sgirish 				search_done = B_TRUE;
68844961713Sgirish 			end_side = TO_RIGHT; /* to the right */
68944961713Sgirish 			base_side = TO_LEFT; /* to the left */
69044961713Sgirish 			/* read the DVMA address information and sort it */
69144961713Sgirish 			dvma_addr =  bufinfo[anchor_index].dvma_addr;
69244961713Sgirish 			chunk_size = bufinfo[anchor_index].buf_size;
69344961713Sgirish 			NXGE_DEBUG_MSG((nxgep, RX2_CTL,
69444961713Sgirish 				"==> nxge_rxbuf_pp_to_vp: (searching)"
69544961713Sgirish 				"buf_pp $%p btype %d "
69644961713Sgirish 				"anchor_index %d chunk_size %d dvmaaddr $%p",
69744961713Sgirish 				pkt_buf_addr_pp,
69844961713Sgirish 				pktbufsz_type,
69944961713Sgirish 				anchor_index,
70044961713Sgirish 				chunk_size,
70144961713Sgirish 				dvma_addr));
70244961713Sgirish 
70344961713Sgirish 			if (pktbuf_pp >= dvma_addr)
70444961713Sgirish 				base_side = TO_RIGHT; /* to the right */
70544961713Sgirish 			if (pktbuf_pp < (dvma_addr + chunk_size))
70644961713Sgirish 				end_side = TO_LEFT; /* to the left */
70744961713Sgirish 
70844961713Sgirish 			switch (base_side + end_side) {
70944961713Sgirish 				case IN_MIDDLE:
71044961713Sgirish 					/* found */
71144961713Sgirish 					found = B_TRUE;
71244961713Sgirish 					search_done = B_TRUE;
71344961713Sgirish 					if ((pktbuf_pp + bufsize) <
71444961713Sgirish 						(dvma_addr + chunk_size))
71544961713Sgirish 						ring_info->hint[pktbufsz_type] =
71644961713Sgirish 						bufinfo[anchor_index].buf_index;
71744961713Sgirish 					break;
71844961713Sgirish 				case BOTH_RIGHT:
71944961713Sgirish 						/* not found: go to the right */
72044961713Sgirish 					l_index = anchor_index + 1;
72144961713Sgirish 					anchor_index =
72244961713Sgirish 						MID_INDEX(r_index, l_index);
72344961713Sgirish 					break;
72444961713Sgirish 
72544961713Sgirish 				case  BOTH_LEFT:
72644961713Sgirish 						/* not found: go to the left */
72744961713Sgirish 					r_index = anchor_index - 1;
72844961713Sgirish 					anchor_index = MID_INDEX(r_index,
72944961713Sgirish 						l_index);
73044961713Sgirish 					break;
73144961713Sgirish 				default: /* should not come here */
73244961713Sgirish 					return (NXGE_ERROR);
73344961713Sgirish 			}
73444961713Sgirish 			iteration++;
73544961713Sgirish 		}
73644961713Sgirish 
73744961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
73844961713Sgirish 			"==> nxge_rxbuf_pp_to_vp: (search done)"
73944961713Sgirish 			"buf_pp $%p btype %d anchor_index %d",
74044961713Sgirish 			pkt_buf_addr_pp,
74144961713Sgirish 			pktbufsz_type,
74244961713Sgirish 			anchor_index));
74344961713Sgirish 	}
74444961713Sgirish 
74544961713Sgirish 	if (found == B_FALSE) {
74644961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
74744961713Sgirish 			"==> nxge_rxbuf_pp_to_vp: (search failed)"
74844961713Sgirish 			"buf_pp $%p btype %d anchor_index %d",
74944961713Sgirish 			pkt_buf_addr_pp,
75044961713Sgirish 			pktbufsz_type,
75144961713Sgirish 			anchor_index));
75244961713Sgirish 		return (NXGE_ERROR);
75344961713Sgirish 	}
75444961713Sgirish 
75544961713Sgirish found_index:
75644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
75744961713Sgirish 		"==> nxge_rxbuf_pp_to_vp: (FOUND1)"
75844961713Sgirish 		"buf_pp $%p btype %d bufsize %d anchor_index %d",
75944961713Sgirish 		pkt_buf_addr_pp,
76044961713Sgirish 		pktbufsz_type,
76144961713Sgirish 		bufsize,
76244961713Sgirish 		anchor_index));
76344961713Sgirish 
76444961713Sgirish 	/* index of the first block in this chunk */
76544961713Sgirish 	chunk_index = bufinfo[anchor_index].start_index;
76644961713Sgirish 	dvma_addr =  bufinfo[anchor_index].dvma_addr;
76744961713Sgirish 	page_size_mask = ring_info->block_size_mask;
76844961713Sgirish 
76944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
77044961713Sgirish 		"==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)"
77144961713Sgirish 		"buf_pp $%p btype %d bufsize %d "
77244961713Sgirish 		"anchor_index %d chunk_index %d dvma $%p",
77344961713Sgirish 		pkt_buf_addr_pp,
77444961713Sgirish 		pktbufsz_type,
77544961713Sgirish 		bufsize,
77644961713Sgirish 		anchor_index,
77744961713Sgirish 		chunk_index,
77844961713Sgirish 		dvma_addr));
77944961713Sgirish 
78044961713Sgirish 	offset = pktbuf_pp - dvma_addr; /* offset within the chunk */
78144961713Sgirish 	block_size = rbr_p->block_size; /* System  block(page) size */
78244961713Sgirish 
78344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
78444961713Sgirish 		"==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)"
78544961713Sgirish 		"buf_pp $%p btype %d bufsize %d "
78644961713Sgirish 		"anchor_index %d chunk_index %d dvma $%p "
78744961713Sgirish 		"offset %d block_size %d",
78844961713Sgirish 		pkt_buf_addr_pp,
78944961713Sgirish 		pktbufsz_type,
79044961713Sgirish 		bufsize,
79144961713Sgirish 		anchor_index,
79244961713Sgirish 		chunk_index,
79344961713Sgirish 		dvma_addr,
79444961713Sgirish 		offset,
79544961713Sgirish 		block_size));
79644961713Sgirish 
79744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index"));
79844961713Sgirish 
79944961713Sgirish 	block_index = (offset / block_size); /* index within chunk */
80044961713Sgirish 	total_index = chunk_index + block_index;
80144961713Sgirish 
80244961713Sgirish 
80344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
80444961713Sgirish 		"==> nxge_rxbuf_pp_to_vp: "
80544961713Sgirish 		"total_index %d dvma_addr $%p "
80644961713Sgirish 		"offset %d block_size %d "
80744961713Sgirish 		"block_index %d ",
80844961713Sgirish 		total_index, dvma_addr,
80944961713Sgirish 		offset, block_size,
81044961713Sgirish 		block_index));
81144961713Sgirish 
81244961713Sgirish 	*pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr
81344961713Sgirish 				+ offset);
81444961713Sgirish 
81544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
81644961713Sgirish 		"==> nxge_rxbuf_pp_to_vp: "
81744961713Sgirish 		"total_index %d dvma_addr $%p "
81844961713Sgirish 		"offset %d block_size %d "
81944961713Sgirish 		"block_index %d "
82044961713Sgirish 		"*pkt_buf_addr_p $%p",
82144961713Sgirish 		total_index, dvma_addr,
82244961713Sgirish 		offset, block_size,
82344961713Sgirish 		block_index,
82444961713Sgirish 		*pkt_buf_addr_p));
82544961713Sgirish 
82644961713Sgirish 
82744961713Sgirish 	*msg_index = total_index;
82844961713Sgirish 	*bufoffset =  (offset & page_size_mask);
82944961713Sgirish 
83044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
83144961713Sgirish 		"==> nxge_rxbuf_pp_to_vp: get msg index: "
83244961713Sgirish 		"msg_index %d bufoffset_index %d",
83344961713Sgirish 		*msg_index,
83444961713Sgirish 		*bufoffset));
83544961713Sgirish 
83644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp"));
83744961713Sgirish 
83844961713Sgirish 	return (NXGE_OK);
83944961713Sgirish }
84044961713Sgirish 
84144961713Sgirish /*
84244961713Sgirish  * used by quick sort (qsort) function
84344961713Sgirish  * to perform comparison
84444961713Sgirish  */
84544961713Sgirish static int
84644961713Sgirish nxge_sort_compare(const void *p1, const void *p2)
84744961713Sgirish {
84844961713Sgirish 
84944961713Sgirish 	rxbuf_index_info_t *a, *b;
85044961713Sgirish 
85144961713Sgirish 	a = (rxbuf_index_info_t *)p1;
85244961713Sgirish 	b = (rxbuf_index_info_t *)p2;
85344961713Sgirish 
85444961713Sgirish 	if (a->dvma_addr > b->dvma_addr)
85544961713Sgirish 		return (1);
85644961713Sgirish 	if (a->dvma_addr < b->dvma_addr)
85744961713Sgirish 		return (-1);
85844961713Sgirish 	return (0);
85944961713Sgirish }
86044961713Sgirish 
86144961713Sgirish 
86244961713Sgirish 
86344961713Sgirish /*
86444961713Sgirish  * grabbed this sort implementation from common/syscall/avl.c
86544961713Sgirish  *
86644961713Sgirish  */
86744961713Sgirish /*
86844961713Sgirish  * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified.
86944961713Sgirish  * v = Ptr to array/vector of objs
87044961713Sgirish  * n = # objs in the array
87144961713Sgirish  * s = size of each obj (must be multiples of a word size)
87244961713Sgirish  * f = ptr to function to compare two objs
87344961713Sgirish  *	returns (-1 = less than, 0 = equal, 1 = greater than
87444961713Sgirish  */
87544961713Sgirish void
87644961713Sgirish nxge_ksort(caddr_t v, int n, int s, int (*f)())
87744961713Sgirish {
87844961713Sgirish 	int g, i, j, ii;
87944961713Sgirish 	unsigned int *p1, *p2;
88044961713Sgirish 	unsigned int tmp;
88144961713Sgirish 
88244961713Sgirish 	/* No work to do */
88344961713Sgirish 	if (v == NULL || n <= 1)
88444961713Sgirish 		return;
88544961713Sgirish 	/* Sanity check on arguments */
88644961713Sgirish 	ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0);
88744961713Sgirish 	ASSERT(s > 0);
88844961713Sgirish 
88944961713Sgirish 	for (g = n / 2; g > 0; g /= 2) {
89044961713Sgirish 		for (i = g; i < n; i++) {
89144961713Sgirish 			for (j = i - g; j >= 0 &&
89244961713Sgirish 				(*f)(v + j * s, v + (j + g) * s) == 1;
89344961713Sgirish 					j -= g) {
89444961713Sgirish 				p1 = (unsigned *)(v + j * s);
89544961713Sgirish 				p2 = (unsigned *)(v + (j + g) * s);
89644961713Sgirish 				for (ii = 0; ii < s / 4; ii++) {
89744961713Sgirish 					tmp = *p1;
89844961713Sgirish 					*p1++ = *p2;
89944961713Sgirish 					*p2++ = tmp;
90044961713Sgirish 				}
90144961713Sgirish 			}
90244961713Sgirish 		}
90344961713Sgirish 	}
90444961713Sgirish }
90544961713Sgirish 
90644961713Sgirish /*
90744961713Sgirish  * Initialize data structures required for rxdma
90844961713Sgirish  * buffer dvma->vmem address lookup
90944961713Sgirish  */
91044961713Sgirish /*ARGSUSED*/
91144961713Sgirish static nxge_status_t
91244961713Sgirish nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp)
91344961713Sgirish {
91444961713Sgirish 
91544961713Sgirish 	int index;
91644961713Sgirish 	rxring_info_t *ring_info;
91744961713Sgirish 	int max_iteration = 0, max_index = 0;
91844961713Sgirish 
91944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init"));
92044961713Sgirish 
92144961713Sgirish 	ring_info = rbrp->ring_info;
92244961713Sgirish 	ring_info->hint[0] = NO_HINT;
92344961713Sgirish 	ring_info->hint[1] = NO_HINT;
92444961713Sgirish 	ring_info->hint[2] = NO_HINT;
92544961713Sgirish 	max_index = rbrp->num_blocks;
92644961713Sgirish 
92744961713Sgirish 		/* read the DVMA address information and sort it */
92844961713Sgirish 		/* do init of the information array */
92944961713Sgirish 
93044961713Sgirish 
93144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
93244961713Sgirish 		" nxge_rxbuf_index_info_init Sort ptrs"));
93344961713Sgirish 
93444961713Sgirish 		/* sort the array */
93544961713Sgirish 	nxge_ksort((void *)ring_info->buffer, max_index,
93644961713Sgirish 		sizeof (rxbuf_index_info_t), nxge_sort_compare);
93744961713Sgirish 
93844961713Sgirish 
93944961713Sgirish 
94044961713Sgirish 	for (index = 0; index < max_index; index++) {
94144961713Sgirish 		NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
94244961713Sgirish 			" nxge_rxbuf_index_info_init: sorted chunk %d "
94344961713Sgirish 			" ioaddr $%p kaddr $%p size %x",
94444961713Sgirish 			index, ring_info->buffer[index].dvma_addr,
94544961713Sgirish 			ring_info->buffer[index].kaddr,
94644961713Sgirish 			ring_info->buffer[index].buf_size));
94744961713Sgirish 	}
94844961713Sgirish 
94944961713Sgirish 	max_iteration = 0;
95044961713Sgirish 	while (max_index >= (1ULL << max_iteration))
95144961713Sgirish 		max_iteration++;
95244961713Sgirish 	ring_info->max_iterations = max_iteration + 1;
95344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
95444961713Sgirish 		" nxge_rxbuf_index_info_init Find max iter %d",
95544961713Sgirish 					ring_info->max_iterations));
95644961713Sgirish 
95744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init"));
95844961713Sgirish 	return (NXGE_OK);
95944961713Sgirish }
96044961713Sgirish 
9610a8e077aSspeer /* ARGSUSED */
96244961713Sgirish void
96344961713Sgirish nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p)
96444961713Sgirish {
96544961713Sgirish #ifdef	NXGE_DEBUG
96644961713Sgirish 
96744961713Sgirish 	uint32_t bptr;
96844961713Sgirish 	uint64_t pp;
96944961713Sgirish 
97044961713Sgirish 	bptr = entry_p->bits.hdw.pkt_buf_addr;
97144961713Sgirish 
97244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
97344961713Sgirish 		"\trcr entry $%p "
97444961713Sgirish 		"\trcr entry 0x%0llx "
97544961713Sgirish 		"\trcr entry 0x%08x "
97644961713Sgirish 		"\trcr entry 0x%08x "
97744961713Sgirish 		"\tvalue 0x%0llx\n"
97844961713Sgirish 		"\tmulti = %d\n"
97944961713Sgirish 		"\tpkt_type = 0x%x\n"
98044961713Sgirish 		"\tzero_copy = %d\n"
98144961713Sgirish 		"\tnoport = %d\n"
98244961713Sgirish 		"\tpromis = %d\n"
98344961713Sgirish 		"\terror = 0x%04x\n"
98444961713Sgirish 		"\tdcf_err = 0x%01x\n"
98544961713Sgirish 		"\tl2_len = %d\n"
98644961713Sgirish 		"\tpktbufsize = %d\n"
98744961713Sgirish 		"\tpkt_buf_addr = $%p\n"
98844961713Sgirish 		"\tpkt_buf_addr (<< 6) = $%p\n",
98944961713Sgirish 		entry_p,
99044961713Sgirish 		*(int64_t *)entry_p,
99144961713Sgirish 		*(int32_t *)entry_p,
99244961713Sgirish 		*(int32_t *)((char *)entry_p + 32),
99344961713Sgirish 		entry_p->value,
99444961713Sgirish 		entry_p->bits.hdw.multi,
99544961713Sgirish 		entry_p->bits.hdw.pkt_type,
99644961713Sgirish 		entry_p->bits.hdw.zero_copy,
99744961713Sgirish 		entry_p->bits.hdw.noport,
99844961713Sgirish 		entry_p->bits.hdw.promis,
99944961713Sgirish 		entry_p->bits.hdw.error,
100044961713Sgirish 		entry_p->bits.hdw.dcf_err,
100144961713Sgirish 		entry_p->bits.hdw.l2_len,
100244961713Sgirish 		entry_p->bits.hdw.pktbufsz,
100344961713Sgirish 		bptr,
100444961713Sgirish 		entry_p->bits.ldw.pkt_buf_addr));
100544961713Sgirish 
100644961713Sgirish 	pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) <<
100744961713Sgirish 		RCR_PKT_BUF_ADDR_SHIFT;
100844961713Sgirish 
100944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d",
101044961713Sgirish 		pp, (*(int64_t *)entry_p >> 40) & 0x3fff));
101144961713Sgirish #endif
101244961713Sgirish }
101344961713Sgirish 
101444961713Sgirish void
101544961713Sgirish nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc)
101644961713Sgirish {
101744961713Sgirish 	npi_handle_t		handle;
101844961713Sgirish 	rbr_stat_t 		rbr_stat;
101944961713Sgirish 	addr44_t 		hd_addr;
102044961713Sgirish 	addr44_t 		tail_addr;
102144961713Sgirish 	uint16_t 		qlen;
102244961713Sgirish 
102344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
102444961713Sgirish 		"==> nxge_rxdma_regs_dump: rdc channel %d", rdc));
102544961713Sgirish 
102644961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
102744961713Sgirish 
102844961713Sgirish 	/* RBR head */
102944961713Sgirish 	hd_addr.addr = 0;
103044961713Sgirish 	(void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr);
103144961713Sgirish 	printf("nxge_rxdma_regs_dump: got hdptr $%p \n",
103244961713Sgirish 		(void *)hd_addr.addr);
103344961713Sgirish 
103444961713Sgirish 	/* RBR stats */
103544961713Sgirish 	(void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat);
103644961713Sgirish 	printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen);
103744961713Sgirish 
103844961713Sgirish 	/* RCR tail */
103944961713Sgirish 	tail_addr.addr = 0;
104044961713Sgirish 	(void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr);
104144961713Sgirish 	printf("nxge_rxdma_regs_dump: got tail ptr $%p \n",
104244961713Sgirish 		(void *)tail_addr.addr);
104344961713Sgirish 
104444961713Sgirish 	/* RCR qlen */
104544961713Sgirish 	(void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen);
104644961713Sgirish 	printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen);
104744961713Sgirish 
104844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
104944961713Sgirish 		"<== nxge_rxdma_regs_dump: rdc rdc %d", rdc));
105044961713Sgirish }
105144961713Sgirish 
105244961713Sgirish void
105344961713Sgirish nxge_rxdma_stop(p_nxge_t nxgep)
105444961713Sgirish {
105544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop"));
105644961713Sgirish 
105744961713Sgirish 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
105844961713Sgirish 	(void) nxge_rx_mac_disable(nxgep);
105944961713Sgirish 	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
106044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop"));
106144961713Sgirish }
106244961713Sgirish 
106344961713Sgirish void
106444961713Sgirish nxge_rxdma_stop_reinit(p_nxge_t nxgep)
106544961713Sgirish {
106644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_reinit"));
106744961713Sgirish 
106844961713Sgirish 	(void) nxge_rxdma_stop(nxgep);
106944961713Sgirish 	(void) nxge_uninit_rxdma_channels(nxgep);
107044961713Sgirish 	(void) nxge_init_rxdma_channels(nxgep);
107144961713Sgirish 
107244961713Sgirish #ifndef	AXIS_DEBUG_LB
107344961713Sgirish 	(void) nxge_xcvr_init(nxgep);
107444961713Sgirish 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
107544961713Sgirish #endif
107644961713Sgirish 	(void) nxge_rx_mac_enable(nxgep);
107744961713Sgirish 
107844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_reinit"));
107944961713Sgirish }
108044961713Sgirish 
108144961713Sgirish nxge_status_t
108244961713Sgirish nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
108344961713Sgirish {
108444961713Sgirish 	int			i, ndmas;
108544961713Sgirish 	uint16_t		channel;
108644961713Sgirish 	p_rx_rbr_rings_t 	rx_rbr_rings;
108744961713Sgirish 	p_rx_rbr_ring_t		*rbr_rings;
108844961713Sgirish 	npi_handle_t		handle;
108944961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
109044961713Sgirish 	nxge_status_t		status = NXGE_OK;
109144961713Sgirish 
109244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
109344961713Sgirish 		"==> nxge_rxdma_hw_mode: mode %d", enable));
109444961713Sgirish 
109544961713Sgirish 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
109644961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
109744961713Sgirish 			"<== nxge_rxdma_mode: not initialized"));
109844961713Sgirish 		return (NXGE_ERROR);
109944961713Sgirish 	}
110044961713Sgirish 
110144961713Sgirish 	rx_rbr_rings = nxgep->rx_rbr_rings;
110244961713Sgirish 	if (rx_rbr_rings == NULL) {
110344961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
110444961713Sgirish 			"<== nxge_rxdma_mode: NULL ring pointer"));
110544961713Sgirish 		return (NXGE_ERROR);
110644961713Sgirish 	}
110744961713Sgirish 	if (rx_rbr_rings->rbr_rings == NULL) {
110844961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
110944961713Sgirish 			"<== nxge_rxdma_mode: NULL rbr rings pointer"));
111044961713Sgirish 		return (NXGE_ERROR);
111144961713Sgirish 	}
111244961713Sgirish 
111344961713Sgirish 	ndmas = rx_rbr_rings->ndmas;
111444961713Sgirish 	if (!ndmas) {
111544961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
111644961713Sgirish 			"<== nxge_rxdma_mode: no channel"));
111744961713Sgirish 		return (NXGE_ERROR);
111844961713Sgirish 	}
111944961713Sgirish 
112044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
112144961713Sgirish 		"==> nxge_rxdma_mode (ndmas %d)", ndmas));
112244961713Sgirish 
112344961713Sgirish 	rbr_rings = rx_rbr_rings->rbr_rings;
112444961713Sgirish 
112544961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
112644961713Sgirish 	for (i = 0; i < ndmas; i++) {
112744961713Sgirish 		if (rbr_rings == NULL || rbr_rings[i] == NULL) {
112844961713Sgirish 			continue;
112944961713Sgirish 		}
113044961713Sgirish 		channel = rbr_rings[i]->rdc;
113144961713Sgirish 		if (enable) {
113244961713Sgirish 			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
113344961713Sgirish 				"==> nxge_rxdma_hw_mode: channel %d (enable)",
113444961713Sgirish 				channel));
113544961713Sgirish 			rs = npi_rxdma_cfg_rdc_enable(handle, channel);
113644961713Sgirish 		} else {
113744961713Sgirish 			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
113844961713Sgirish 				"==> nxge_rxdma_hw_mode: channel %d (disable)",
113944961713Sgirish 				channel));
114044961713Sgirish 			rs = npi_rxdma_cfg_rdc_disable(handle, channel);
114144961713Sgirish 		}
114244961713Sgirish 	}
114344961713Sgirish 
114444961713Sgirish 	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
114544961713Sgirish 
114644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
114744961713Sgirish 		"<== nxge_rxdma_hw_mode: status 0x%x", status));
114844961713Sgirish 
114944961713Sgirish 	return (status);
115044961713Sgirish }
115144961713Sgirish 
115244961713Sgirish void
115344961713Sgirish nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
115444961713Sgirish {
115544961713Sgirish 	npi_handle_t		handle;
115644961713Sgirish 
115744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
115844961713Sgirish 		"==> nxge_rxdma_enable_channel: channel %d", channel));
115944961713Sgirish 
116044961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
116144961713Sgirish 	(void) npi_rxdma_cfg_rdc_enable(handle, channel);
116244961713Sgirish 
116344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel"));
116444961713Sgirish }
116544961713Sgirish 
116644961713Sgirish void
116744961713Sgirish nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
116844961713Sgirish {
116944961713Sgirish 	npi_handle_t		handle;
117044961713Sgirish 
117144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
117244961713Sgirish 		"==> nxge_rxdma_disable_channel: channel %d", channel));
117344961713Sgirish 
117444961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
117544961713Sgirish 	(void) npi_rxdma_cfg_rdc_disable(handle, channel);
117644961713Sgirish 
117744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel"));
117844961713Sgirish }
117944961713Sgirish 
118044961713Sgirish void
118144961713Sgirish nxge_hw_start_rx(p_nxge_t nxgep)
118244961713Sgirish {
118344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx"));
118444961713Sgirish 
118544961713Sgirish 	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START);
118644961713Sgirish 	(void) nxge_rx_mac_enable(nxgep);
118744961713Sgirish 
118844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx"));
118944961713Sgirish }
119044961713Sgirish 
119144961713Sgirish /*ARGSUSED*/
119244961713Sgirish void
119344961713Sgirish nxge_fixup_rxdma_rings(p_nxge_t nxgep)
119444961713Sgirish {
119544961713Sgirish 	int			i, ndmas;
119644961713Sgirish 	uint16_t		rdc;
119744961713Sgirish 	p_rx_rbr_rings_t 	rx_rbr_rings;
119844961713Sgirish 	p_rx_rbr_ring_t		*rbr_rings;
119944961713Sgirish 	p_rx_rcr_rings_t 	rx_rcr_rings;
120044961713Sgirish 
120144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings"));
120244961713Sgirish 
120344961713Sgirish 	rx_rbr_rings = nxgep->rx_rbr_rings;
120444961713Sgirish 	if (rx_rbr_rings == NULL) {
120544961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
120644961713Sgirish 			"<== nxge_fixup_rxdma_rings: NULL ring pointer"));
120744961713Sgirish 		return;
120844961713Sgirish 	}
120944961713Sgirish 	ndmas = rx_rbr_rings->ndmas;
121044961713Sgirish 	if (!ndmas) {
121144961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
121244961713Sgirish 			"<== nxge_fixup_rxdma_rings: no channel"));
121344961713Sgirish 		return;
121444961713Sgirish 	}
121544961713Sgirish 
121644961713Sgirish 	rx_rcr_rings = nxgep->rx_rcr_rings;
121744961713Sgirish 	if (rx_rcr_rings == NULL) {
121844961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
121944961713Sgirish 			"<== nxge_fixup_rxdma_rings: NULL ring pointer"));
122044961713Sgirish 		return;
122144961713Sgirish 	}
122244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
122344961713Sgirish 		"==> nxge_fixup_rxdma_rings (ndmas %d)", ndmas));
122444961713Sgirish 
122544961713Sgirish 	nxge_rxdma_hw_stop(nxgep);
122644961713Sgirish 
122744961713Sgirish 	rbr_rings = rx_rbr_rings->rbr_rings;
122844961713Sgirish 	for (i = 0; i < ndmas; i++) {
122944961713Sgirish 		rdc = rbr_rings[i]->rdc;
123044961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
123144961713Sgirish 			"==> nxge_fixup_rxdma_rings: channel %d "
123244961713Sgirish 			"ring $%px", rdc, rbr_rings[i]));
123344961713Sgirish 		(void) nxge_rxdma_fixup_channel(nxgep, rdc, i);
123444961713Sgirish 	}
123544961713Sgirish 
123644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings"));
123744961713Sgirish }
123844961713Sgirish 
123944961713Sgirish void
124044961713Sgirish nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
124144961713Sgirish {
124244961713Sgirish 	int		i;
124344961713Sgirish 
124444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel"));
124544961713Sgirish 	i = nxge_rxdma_get_ring_index(nxgep, channel);
124644961713Sgirish 	if (i < 0) {
124744961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
124844961713Sgirish 			"<== nxge_rxdma_fix_channel: no entry found"));
124944961713Sgirish 		return;
125044961713Sgirish 	}
125144961713Sgirish 
125244961713Sgirish 	nxge_rxdma_fixup_channel(nxgep, channel, i);
125344961713Sgirish 
125444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_txdma_fix_channel"));
125544961713Sgirish }
125644961713Sgirish 
125744961713Sgirish void
125844961713Sgirish nxge_rxdma_fixup_channel(p_nxge_t nxgep, uint16_t channel, int entry)
125944961713Sgirish {
126044961713Sgirish 	int			ndmas;
126144961713Sgirish 	p_rx_rbr_rings_t 	rx_rbr_rings;
126244961713Sgirish 	p_rx_rbr_ring_t		*rbr_rings;
126344961713Sgirish 	p_rx_rcr_rings_t 	rx_rcr_rings;
126444961713Sgirish 	p_rx_rcr_ring_t		*rcr_rings;
126544961713Sgirish 	p_rx_mbox_areas_t 	rx_mbox_areas_p;
126644961713Sgirish 	p_rx_mbox_t		*rx_mbox_p;
126744961713Sgirish 	p_nxge_dma_pool_t	dma_buf_poolp;
126844961713Sgirish 	p_nxge_dma_pool_t	dma_cntl_poolp;
126944961713Sgirish 	p_rx_rbr_ring_t 	rbrp;
127044961713Sgirish 	p_rx_rcr_ring_t 	rcrp;
127144961713Sgirish 	p_rx_mbox_t 		mboxp;
127244961713Sgirish 	p_nxge_dma_common_t 	dmap;
127344961713Sgirish 	nxge_status_t		status = NXGE_OK;
127444961713Sgirish 
127544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fixup_channel"));
127644961713Sgirish 
127744961713Sgirish 	(void) nxge_rxdma_stop_channel(nxgep, channel);
127844961713Sgirish 
127944961713Sgirish 	dma_buf_poolp = nxgep->rx_buf_pool_p;
128044961713Sgirish 	dma_cntl_poolp = nxgep->rx_cntl_pool_p;
128144961713Sgirish 
128244961713Sgirish 	if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
128344961713Sgirish 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
128444961713Sgirish 			"<== nxge_rxdma_fixup_channel: buf not allocated"));
128544961713Sgirish 		return;
128644961713Sgirish 	}
128744961713Sgirish 
128844961713Sgirish 	ndmas = dma_buf_poolp->ndmas;
128944961713Sgirish 	if (!ndmas) {
129044961713Sgirish 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
129144961713Sgirish 			"<== nxge_rxdma_fixup_channel: no dma allocated"));
129244961713Sgirish 		return;
129344961713Sgirish 	}
129444961713Sgirish 
1295a3c5bd6dSspeer 	rx_rbr_rings = nxgep->rx_rbr_rings;
129644961713Sgirish 	rx_rcr_rings = nxgep->rx_rcr_rings;
129744961713Sgirish 	rbr_rings = rx_rbr_rings->rbr_rings;
129844961713Sgirish 	rcr_rings = rx_rcr_rings->rcr_rings;
129944961713Sgirish 	rx_mbox_areas_p = nxgep->rx_mbox_areas_p;
130044961713Sgirish 	rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
130144961713Sgirish 
130244961713Sgirish 	/* Reinitialize the receive block and completion rings */
130344961713Sgirish 	rbrp = (p_rx_rbr_ring_t)rbr_rings[entry],
130444961713Sgirish 	rcrp = (p_rx_rcr_ring_t)rcr_rings[entry],
130544961713Sgirish 	mboxp = (p_rx_mbox_t)rx_mbox_p[entry];
130644961713Sgirish 
130744961713Sgirish 
130844961713Sgirish 	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
130944961713Sgirish 	rbrp->rbr_rd_index = 0;
131044961713Sgirish 	rcrp->comp_rd_index = 0;
131144961713Sgirish 	rcrp->comp_wt_index = 0;
131244961713Sgirish 
131344961713Sgirish 	dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
131444961713Sgirish 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
131544961713Sgirish 
131644961713Sgirish 	status = nxge_rxdma_start_channel(nxgep, channel,
131744961713Sgirish 			rbrp, rcrp, mboxp);
131844961713Sgirish 	if (status != NXGE_OK) {
131944961713Sgirish 		goto nxge_rxdma_fixup_channel_fail;
132044961713Sgirish 	}
132144961713Sgirish 	if (status != NXGE_OK) {
132244961713Sgirish 		goto nxge_rxdma_fixup_channel_fail;
132344961713Sgirish 	}
132444961713Sgirish 
132544961713Sgirish nxge_rxdma_fixup_channel_fail:
132644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
132744961713Sgirish 		"==> nxge_rxdma_fixup_channel: failed (0x%08x)", status));
132844961713Sgirish 
132944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel"));
133044961713Sgirish }
133144961713Sgirish 
133244961713Sgirish int
133344961713Sgirish nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel)
133444961713Sgirish {
133544961713Sgirish 	int			i, ndmas;
133644961713Sgirish 	uint16_t		rdc;
133744961713Sgirish 	p_rx_rbr_rings_t 	rx_rbr_rings;
133844961713Sgirish 	p_rx_rbr_ring_t		*rbr_rings;
133944961713Sgirish 
134044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
134144961713Sgirish 		"==> nxge_rxdma_get_ring_index: channel %d", channel));
134244961713Sgirish 
134344961713Sgirish 	rx_rbr_rings = nxgep->rx_rbr_rings;
134444961713Sgirish 	if (rx_rbr_rings == NULL) {
134544961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
134644961713Sgirish 			"<== nxge_rxdma_get_ring_index: NULL ring pointer"));
134744961713Sgirish 		return (-1);
134844961713Sgirish 	}
134944961713Sgirish 	ndmas = rx_rbr_rings->ndmas;
135044961713Sgirish 	if (!ndmas) {
135144961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
135244961713Sgirish 			"<== nxge_rxdma_get_ring_index: no channel"));
135344961713Sgirish 		return (-1);
135444961713Sgirish 	}
135544961713Sgirish 
135644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
135744961713Sgirish 		"==> nxge_rxdma_get_ring_index (ndmas %d)", ndmas));
135844961713Sgirish 
135944961713Sgirish 	rbr_rings = rx_rbr_rings->rbr_rings;
136044961713Sgirish 	for (i = 0; i < ndmas; i++) {
136144961713Sgirish 		rdc = rbr_rings[i]->rdc;
136244961713Sgirish 		if (channel == rdc) {
136344961713Sgirish 			NXGE_DEBUG_MSG((nxgep, RX_CTL,
136444961713Sgirish 				"==> nxge_rxdma_get_rbr_ring: "
136544961713Sgirish 				"channel %d (index %d) "
136644961713Sgirish 				"ring %d", channel, i,
136744961713Sgirish 				rbr_rings[i]));
136844961713Sgirish 			return (i);
136944961713Sgirish 		}
137044961713Sgirish 	}
137144961713Sgirish 
137244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
137344961713Sgirish 		"<== nxge_rxdma_get_rbr_ring_index: not found"));
137444961713Sgirish 
137544961713Sgirish 	return (-1);
137644961713Sgirish }
137744961713Sgirish 
137844961713Sgirish p_rx_rbr_ring_t
137944961713Sgirish nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel)
138044961713Sgirish {
138144961713Sgirish 	int			i, ndmas;
138244961713Sgirish 	uint16_t		rdc;
138344961713Sgirish 	p_rx_rbr_rings_t 	rx_rbr_rings;
138444961713Sgirish 	p_rx_rbr_ring_t		*rbr_rings;
138544961713Sgirish 
138644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
138744961713Sgirish 		"==> nxge_rxdma_get_rbr_ring: channel %d", channel));
138844961713Sgirish 
138944961713Sgirish 	rx_rbr_rings = nxgep->rx_rbr_rings;
139044961713Sgirish 	if (rx_rbr_rings == NULL) {
139144961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
139244961713Sgirish 			"<== nxge_rxdma_get_rbr_ring: NULL ring pointer"));
139344961713Sgirish 		return (NULL);
139444961713Sgirish 	}
139544961713Sgirish 	ndmas = rx_rbr_rings->ndmas;
139644961713Sgirish 	if (!ndmas) {
139744961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
139844961713Sgirish 			"<== nxge_rxdma_get_rbr_ring: no channel"));
139944961713Sgirish 		return (NULL);
140044961713Sgirish 	}
140144961713Sgirish 
140244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
140344961713Sgirish 		"==> nxge_rxdma_get_ring (ndmas %d)", ndmas));
140444961713Sgirish 
140544961713Sgirish 	rbr_rings = rx_rbr_rings->rbr_rings;
140644961713Sgirish 	for (i = 0; i < ndmas; i++) {
140744961713Sgirish 		rdc = rbr_rings[i]->rdc;
140844961713Sgirish 		if (channel == rdc) {
140944961713Sgirish 			NXGE_DEBUG_MSG((nxgep, RX_CTL,
141044961713Sgirish 				"==> nxge_rxdma_get_rbr_ring: channel %d "
141144961713Sgirish 				"ring $%p", channel, rbr_rings[i]));
141244961713Sgirish 			return (rbr_rings[i]);
141344961713Sgirish 		}
141444961713Sgirish 	}
141544961713Sgirish 
141644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
141744961713Sgirish 		"<== nxge_rxdma_get_rbr_ring: not found"));
141844961713Sgirish 
141944961713Sgirish 	return (NULL);
142044961713Sgirish }
142144961713Sgirish 
142244961713Sgirish p_rx_rcr_ring_t
142344961713Sgirish nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel)
142444961713Sgirish {
142544961713Sgirish 	int			i, ndmas;
142644961713Sgirish 	uint16_t		rdc;
142744961713Sgirish 	p_rx_rcr_rings_t 	rx_rcr_rings;
142844961713Sgirish 	p_rx_rcr_ring_t		*rcr_rings;
142944961713Sgirish 
143044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
143144961713Sgirish 		"==> nxge_rxdma_get_rcr_ring: channel %d", channel));
143244961713Sgirish 
143344961713Sgirish 	rx_rcr_rings = nxgep->rx_rcr_rings;
143444961713Sgirish 	if (rx_rcr_rings == NULL) {
143544961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
143644961713Sgirish 			"<== nxge_rxdma_get_rcr_ring: NULL ring pointer"));
143744961713Sgirish 		return (NULL);
143844961713Sgirish 	}
143944961713Sgirish 	ndmas = rx_rcr_rings->ndmas;
144044961713Sgirish 	if (!ndmas) {
144144961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
144244961713Sgirish 			"<== nxge_rxdma_get_rcr_ring: no channel"));
144344961713Sgirish 		return (NULL);
144444961713Sgirish 	}
144544961713Sgirish 
144644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
144744961713Sgirish 		"==> nxge_rxdma_get_rcr_ring (ndmas %d)", ndmas));
144844961713Sgirish 
144944961713Sgirish 	rcr_rings = rx_rcr_rings->rcr_rings;
145044961713Sgirish 	for (i = 0; i < ndmas; i++) {
145144961713Sgirish 		rdc = rcr_rings[i]->rdc;
145244961713Sgirish 		if (channel == rdc) {
145344961713Sgirish 			NXGE_DEBUG_MSG((nxgep, RX_CTL,
145444961713Sgirish 				"==> nxge_rxdma_get_rcr_ring: channel %d "
145544961713Sgirish 				"ring $%p", channel, rcr_rings[i]));
145644961713Sgirish 			return (rcr_rings[i]);
145744961713Sgirish 		}
145844961713Sgirish 	}
145944961713Sgirish 
146044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
146144961713Sgirish 		"<== nxge_rxdma_get_rcr_ring: not found"));
146244961713Sgirish 
146344961713Sgirish 	return (NULL);
146444961713Sgirish }
146544961713Sgirish 
146644961713Sgirish /*
146744961713Sgirish  * Static functions start here.
146844961713Sgirish  */
146944961713Sgirish static p_rx_msg_t
147044961713Sgirish nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p)
147144961713Sgirish {
147244961713Sgirish 	p_rx_msg_t nxge_mp 		= NULL;
147344961713Sgirish 	p_nxge_dma_common_t		dmamsg_p;
147444961713Sgirish 	uchar_t 			*buffer;
147544961713Sgirish 
147644961713Sgirish 	nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP);
147744961713Sgirish 	if (nxge_mp == NULL) {
147856d930aeSspeer 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
147944961713Sgirish 			"Allocation of a rx msg failed."));
148044961713Sgirish 		goto nxge_allocb_exit;
148144961713Sgirish 	}
148244961713Sgirish 
148344961713Sgirish 	nxge_mp->use_buf_pool = B_FALSE;
148444961713Sgirish 	if (dmabuf_p) {
148544961713Sgirish 		nxge_mp->use_buf_pool = B_TRUE;
148644961713Sgirish 		dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma;
148744961713Sgirish 		*dmamsg_p = *dmabuf_p;
148844961713Sgirish 		dmamsg_p->nblocks = 1;
148944961713Sgirish 		dmamsg_p->block_size = size;
149044961713Sgirish 		dmamsg_p->alength = size;
149144961713Sgirish 		buffer = (uchar_t *)dmabuf_p->kaddrp;
149244961713Sgirish 
149344961713Sgirish 		dmabuf_p->kaddrp = (void *)
149444961713Sgirish 				((char *)dmabuf_p->kaddrp + size);
149544961713Sgirish 		dmabuf_p->ioaddr_pp = (void *)
149644961713Sgirish 				((char *)dmabuf_p->ioaddr_pp + size);
149744961713Sgirish 		dmabuf_p->alength -= size;
149844961713Sgirish 		dmabuf_p->offset += size;
149944961713Sgirish 		dmabuf_p->dma_cookie.dmac_laddress += size;
150044961713Sgirish 		dmabuf_p->dma_cookie.dmac_size -= size;
150144961713Sgirish 
150244961713Sgirish 	} else {
150344961713Sgirish 		buffer = KMEM_ALLOC(size, KM_NOSLEEP);
150444961713Sgirish 		if (buffer == NULL) {
150556d930aeSspeer 			NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
150644961713Sgirish 				"Allocation of a receive page failed."));
150744961713Sgirish 			goto nxge_allocb_fail1;
150844961713Sgirish 		}
150944961713Sgirish 	}
151044961713Sgirish 
151144961713Sgirish 	nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb);
151244961713Sgirish 	if (nxge_mp->rx_mblk_p == NULL) {
151356d930aeSspeer 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed."));
151444961713Sgirish 		goto nxge_allocb_fail2;
151544961713Sgirish 	}
151644961713Sgirish 
151744961713Sgirish 	nxge_mp->buffer = buffer;
151844961713Sgirish 	nxge_mp->block_size = size;
151944961713Sgirish 	nxge_mp->freeb.free_func = (void (*)())nxge_freeb;
152044961713Sgirish 	nxge_mp->freeb.free_arg = (caddr_t)nxge_mp;
152144961713Sgirish 	nxge_mp->ref_cnt = 1;
152244961713Sgirish 	nxge_mp->free = B_TRUE;
152344961713Sgirish 	nxge_mp->rx_use_bcopy = B_FALSE;
152444961713Sgirish 
152514ea4bb7Ssd 	atomic_inc_32(&nxge_mblks_pending);
152644961713Sgirish 
152744961713Sgirish 	goto nxge_allocb_exit;
152844961713Sgirish 
152944961713Sgirish nxge_allocb_fail2:
153044961713Sgirish 	if (!nxge_mp->use_buf_pool) {
153144961713Sgirish 		KMEM_FREE(buffer, size);
153244961713Sgirish 	}
153344961713Sgirish 
153444961713Sgirish nxge_allocb_fail1:
153544961713Sgirish 	KMEM_FREE(nxge_mp, sizeof (rx_msg_t));
153644961713Sgirish 	nxge_mp = NULL;
153744961713Sgirish 
153844961713Sgirish nxge_allocb_exit:
153944961713Sgirish 	return (nxge_mp);
154044961713Sgirish }
154144961713Sgirish 
154244961713Sgirish p_mblk_t
154344961713Sgirish nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size)
154444961713Sgirish {
154544961713Sgirish 	p_mblk_t mp;
154644961713Sgirish 
154744961713Sgirish 	NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb"));
154844961713Sgirish 	NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p "
154944961713Sgirish 		"offset = 0x%08X "
155044961713Sgirish 		"size = 0x%08X",
155144961713Sgirish 		nxge_mp, offset, size));
155244961713Sgirish 
155344961713Sgirish 	mp = desballoc(&nxge_mp->buffer[offset], size,
155444961713Sgirish 				0, &nxge_mp->freeb);
155544961713Sgirish 	if (mp == NULL) {
155644961713Sgirish 		NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
155744961713Sgirish 		goto nxge_dupb_exit;
155844961713Sgirish 	}
155944961713Sgirish 	atomic_inc_32(&nxge_mp->ref_cnt);
156014ea4bb7Ssd 	atomic_inc_32(&nxge_mblks_pending);
156144961713Sgirish 
156244961713Sgirish 
156344961713Sgirish nxge_dupb_exit:
156444961713Sgirish 	NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p",
156544961713Sgirish 		nxge_mp));
156644961713Sgirish 	return (mp);
156744961713Sgirish }
156844961713Sgirish 
156944961713Sgirish p_mblk_t
157044961713Sgirish nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size)
157144961713Sgirish {
157244961713Sgirish 	p_mblk_t mp;
157344961713Sgirish 	uchar_t *dp;
157444961713Sgirish 
157544961713Sgirish 	mp = allocb(size + NXGE_RXBUF_EXTRA, 0);
157644961713Sgirish 	if (mp == NULL) {
157744961713Sgirish 		NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
157844961713Sgirish 		goto nxge_dupb_bcopy_exit;
157944961713Sgirish 	}
158044961713Sgirish 	dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA;
158144961713Sgirish 	bcopy((void *)&nxge_mp->buffer[offset], dp, size);
158244961713Sgirish 	mp->b_wptr = dp + size;
158344961713Sgirish 
158444961713Sgirish nxge_dupb_bcopy_exit:
158544961713Sgirish 	NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p",
158644961713Sgirish 		nxge_mp));
158744961713Sgirish 	return (mp);
158844961713Sgirish }
158944961713Sgirish 
159044961713Sgirish void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p,
159144961713Sgirish 	p_rx_msg_t rx_msg_p);
159244961713Sgirish 
159344961713Sgirish void
159444961713Sgirish nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p)
159544961713Sgirish {
159644961713Sgirish 
159744961713Sgirish 	npi_handle_t		handle;
159844961713Sgirish 
159944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page"));
160044961713Sgirish 
160144961713Sgirish 	/* Reuse this buffer */
160244961713Sgirish 	rx_msg_p->free = B_FALSE;
160344961713Sgirish 	rx_msg_p->cur_usage_cnt = 0;
160444961713Sgirish 	rx_msg_p->max_usage_cnt = 0;
160544961713Sgirish 	rx_msg_p->pkt_buf_size = 0;
160644961713Sgirish 
160744961713Sgirish 	if (rx_rbr_p->rbr_use_bcopy) {
160844961713Sgirish 		rx_msg_p->rx_use_bcopy = B_FALSE;
160944961713Sgirish 		atomic_dec_32(&rx_rbr_p->rbr_consumed);
161044961713Sgirish 	}
161144961713Sgirish 
161244961713Sgirish 	/*
161344961713Sgirish 	 * Get the rbr header pointer and its offset index.
161444961713Sgirish 	 */
161544961713Sgirish 	MUTEX_ENTER(&rx_rbr_p->post_lock);
161644961713Sgirish 
161744961713Sgirish 
161844961713Sgirish 	rx_rbr_p->rbr_wr_index =  ((rx_rbr_p->rbr_wr_index + 1) &
161944961713Sgirish 					    rx_rbr_p->rbr_wrap_mask);
162044961713Sgirish 	rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr;
162144961713Sgirish 	MUTEX_EXIT(&rx_rbr_p->post_lock);
162244961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
162344961713Sgirish 	npi_rxdma_rdc_rbr_kick(handle, rx_rbr_p->rdc, 1);
162444961713Sgirish 
162544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
162644961713Sgirish 		"<== nxge_post_page (channel %d post_next_index %d)",
162744961713Sgirish 		rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index));
162844961713Sgirish 
162944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page"));
163044961713Sgirish }
163144961713Sgirish 
163244961713Sgirish void
163344961713Sgirish nxge_freeb(p_rx_msg_t rx_msg_p)
163444961713Sgirish {
163544961713Sgirish 	size_t size;
163644961713Sgirish 	uchar_t *buffer = NULL;
163744961713Sgirish 	int ref_cnt;
1638958cea9eSml 	boolean_t free_state = B_FALSE;
163944961713Sgirish 
164044961713Sgirish 	NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb"));
164144961713Sgirish 	NXGE_DEBUG_MSG((NULL, MEM2_CTL,
164244961713Sgirish 		"nxge_freeb:rx_msg_p = $%p (block pending %d)",
164344961713Sgirish 		rx_msg_p, nxge_mblks_pending));
164444961713Sgirish 
164514ea4bb7Ssd 	atomic_dec_32(&nxge_mblks_pending);
1646958cea9eSml 	/*
1647958cea9eSml 	 * First we need to get the free state, then
1648958cea9eSml 	 * atomic decrement the reference count to prevent
1649958cea9eSml 	 * the race condition with the interrupt thread that
1650958cea9eSml 	 * is processing a loaned up buffer block.
1651958cea9eSml 	 */
1652958cea9eSml 	free_state = rx_msg_p->free;
1653958cea9eSml 	ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1);
165444961713Sgirish 	if (!ref_cnt) {
165544961713Sgirish 		buffer = rx_msg_p->buffer;
165644961713Sgirish 		size = rx_msg_p->block_size;
165744961713Sgirish 		NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: "
165844961713Sgirish 			"will free: rx_msg_p = $%p (block pending %d)",
165956d930aeSspeer 			rx_msg_p, nxge_mblks_pending));
166044961713Sgirish 
166144961713Sgirish 		if (!rx_msg_p->use_buf_pool) {
166244961713Sgirish 			KMEM_FREE(buffer, size);
166344961713Sgirish 		}
166414ea4bb7Ssd 
166514ea4bb7Ssd 		KMEM_FREE(rx_msg_p, sizeof (rx_msg_t));
166614ea4bb7Ssd 		return;
166744961713Sgirish 	}
166844961713Sgirish 
166944961713Sgirish 	/*
167044961713Sgirish 	 * Repost buffer.
167144961713Sgirish 	 */
1672958cea9eSml 	if (free_state && (ref_cnt == 1)) {
167344961713Sgirish 		NXGE_DEBUG_MSG((NULL, RX_CTL,
167444961713Sgirish 		    "nxge_freeb: post page $%p:", rx_msg_p));
167544961713Sgirish 		nxge_post_page(rx_msg_p->nxgep, rx_msg_p->rx_rbr_p,
167644961713Sgirish 		    rx_msg_p);
167744961713Sgirish 	}
167844961713Sgirish 
167944961713Sgirish 	NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb"));
168044961713Sgirish }
168144961713Sgirish 
168244961713Sgirish uint_t
168344961713Sgirish nxge_rx_intr(void *arg1, void *arg2)
168444961713Sgirish {
168544961713Sgirish 	p_nxge_ldv_t		ldvp = (p_nxge_ldv_t)arg1;
168644961713Sgirish 	p_nxge_t		nxgep = (p_nxge_t)arg2;
168744961713Sgirish 	p_nxge_ldg_t		ldgp;
168844961713Sgirish 	uint8_t			channel;
168944961713Sgirish 	npi_handle_t		handle;
169044961713Sgirish 	rx_dma_ctl_stat_t	cs;
169144961713Sgirish 
169244961713Sgirish #ifdef	NXGE_DEBUG
169344961713Sgirish 	rxdma_cfig1_t		cfg;
169444961713Sgirish #endif
169544961713Sgirish 	uint_t 			serviced = DDI_INTR_UNCLAIMED;
169644961713Sgirish 
169744961713Sgirish 	if (ldvp == NULL) {
169844961713Sgirish 		NXGE_DEBUG_MSG((NULL, INT_CTL,
169944961713Sgirish 			"<== nxge_rx_intr: arg2 $%p arg1 $%p",
170044961713Sgirish 			nxgep, ldvp));
170144961713Sgirish 
170244961713Sgirish 		return (DDI_INTR_CLAIMED);
170344961713Sgirish 	}
170444961713Sgirish 
170544961713Sgirish 	if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
170644961713Sgirish 		nxgep = ldvp->nxgep;
170744961713Sgirish 	}
170844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
170944961713Sgirish 		"==> nxge_rx_intr: arg2 $%p arg1 $%p",
171044961713Sgirish 		nxgep, ldvp));
171144961713Sgirish 
171244961713Sgirish 	/*
171344961713Sgirish 	 * This interrupt handler is for a specific
171444961713Sgirish 	 * receive dma channel.
171544961713Sgirish 	 */
171644961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
171744961713Sgirish 	/*
171844961713Sgirish 	 * Get the control and status for this channel.
171944961713Sgirish 	 */
172044961713Sgirish 	channel = ldvp->channel;
172144961713Sgirish 	ldgp = ldvp->ldgp;
172244961713Sgirish 	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value);
172344961713Sgirish 
172444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d "
172544961713Sgirish 		"cs 0x%016llx rcrto 0x%x rcrthres %x",
172644961713Sgirish 		channel,
172744961713Sgirish 		cs.value,
172844961713Sgirish 		cs.bits.hdw.rcrto,
172944961713Sgirish 		cs.bits.hdw.rcrthres));
173044961713Sgirish 
173144961713Sgirish 	nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, ldvp, cs);
173244961713Sgirish 	serviced = DDI_INTR_CLAIMED;
173344961713Sgirish 
173444961713Sgirish 	/* error events. */
173544961713Sgirish 	if (cs.value & RX_DMA_CTL_STAT_ERROR) {
173644961713Sgirish 		(void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs);
173744961713Sgirish 	}
173844961713Sgirish 
173944961713Sgirish nxge_intr_exit:
174044961713Sgirish 
174144961713Sgirish 
174244961713Sgirish 	/*
174344961713Sgirish 	 * Enable the mailbox update interrupt if we want
174444961713Sgirish 	 * to use mailbox. We probably don't need to use
174544961713Sgirish 	 * mailbox as it only saves us one pio read.
174644961713Sgirish 	 * Also write 1 to rcrthres and rcrto to clear
174744961713Sgirish 	 * these two edge triggered bits.
174844961713Sgirish 	 */
174944961713Sgirish 
175044961713Sgirish 	cs.value &= RX_DMA_CTL_STAT_WR1C;
175144961713Sgirish 	cs.bits.hdw.mex = 1;
175244961713Sgirish 	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
175344961713Sgirish 			cs.value);
175444961713Sgirish 
175544961713Sgirish 	/*
175644961713Sgirish 	 * Rearm this logical group if this is a single device
175744961713Sgirish 	 * group.
175844961713Sgirish 	 */
175944961713Sgirish 	if (ldgp->nldvs == 1) {
176044961713Sgirish 		ldgimgm_t		mgm;
176144961713Sgirish 		mgm.value = 0;
176244961713Sgirish 		mgm.bits.ldw.arm = 1;
176344961713Sgirish 		mgm.bits.ldw.timer = ldgp->ldg_timer;
176444961713Sgirish 		NXGE_REG_WR64(handle,
176544961713Sgirish 			    LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
176644961713Sgirish 			    mgm.value);
176744961713Sgirish 	}
176844961713Sgirish 
176944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: serviced %d",
177044961713Sgirish 		serviced));
177144961713Sgirish 	return (serviced);
177244961713Sgirish }
177344961713Sgirish 
177444961713Sgirish /*
177544961713Sgirish  * Process the packets received in the specified logical device
177644961713Sgirish  * and pass up a chain of message blocks to the upper layer.
177744961713Sgirish  */
177844961713Sgirish static void
177944961713Sgirish nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, p_nxge_ldv_t ldvp,
178014ea4bb7Ssd 				    rx_dma_ctl_stat_t cs)
178144961713Sgirish {
178244961713Sgirish 	p_mblk_t		mp;
178344961713Sgirish 	p_rx_rcr_ring_t		rcrp;
178444961713Sgirish 
178544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring"));
178644961713Sgirish 	if ((mp = nxge_rx_pkts(nxgep, vindex, ldvp, &rcrp, cs)) == NULL) {
178744961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
178844961713Sgirish 			"<== nxge_rx_pkts_vring: no mp"));
178944961713Sgirish 		return;
179044961713Sgirish 	}
179144961713Sgirish 
179244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p",
179344961713Sgirish 		mp));
179444961713Sgirish 
179544961713Sgirish #ifdef  NXGE_DEBUG
179644961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
179744961713Sgirish 			"==> nxge_rx_pkts_vring:calling mac_rx "
179814ea4bb7Ssd 			"LEN %d mp $%p mp->b_cont $%p mp->b_next $%p rcrp $%p "
179944961713Sgirish 			"mac_handle $%p",
180014ea4bb7Ssd 			mp->b_wptr - mp->b_rptr,
180114ea4bb7Ssd 			mp, mp->b_cont, mp->b_next,
180244961713Sgirish 			rcrp, rcrp->rcr_mac_handle));
180344961713Sgirish 
180444961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
180544961713Sgirish 			"==> nxge_rx_pkts_vring: dump packets "
180644961713Sgirish 			"(mp $%p b_rptr $%p b_wptr $%p):\n %s",
180744961713Sgirish 			mp,
180844961713Sgirish 			mp->b_rptr,
180944961713Sgirish 			mp->b_wptr,
181014ea4bb7Ssd 			nxge_dump_packet((char *)mp->b_rptr,
181114ea4bb7Ssd 			mp->b_wptr - mp->b_rptr)));
181214ea4bb7Ssd 		if (mp->b_cont) {
181314ea4bb7Ssd 			NXGE_DEBUG_MSG((nxgep, RX_CTL,
181414ea4bb7Ssd 				"==> nxge_rx_pkts_vring: dump b_cont packets "
181514ea4bb7Ssd 				"(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s",
181614ea4bb7Ssd 				mp->b_cont,
181714ea4bb7Ssd 				mp->b_cont->b_rptr,
181814ea4bb7Ssd 				mp->b_cont->b_wptr,
181914ea4bb7Ssd 				nxge_dump_packet((char *)mp->b_cont->b_rptr,
182014ea4bb7Ssd 				mp->b_cont->b_wptr - mp->b_cont->b_rptr)));
182114ea4bb7Ssd 		}
182244961713Sgirish 		if (mp->b_next) {
182344961713Sgirish 			NXGE_DEBUG_MSG((nxgep, RX_CTL,
182444961713Sgirish 				"==> nxge_rx_pkts_vring: dump next packets "
182544961713Sgirish 				"(b_rptr $%p): %s",
182644961713Sgirish 				mp->b_next->b_rptr,
182744961713Sgirish 				nxge_dump_packet((char *)mp->b_next->b_rptr,
182814ea4bb7Ssd 				mp->b_next->b_wptr - mp->b_next->b_rptr)));
182944961713Sgirish 		}
183044961713Sgirish #endif
183144961713Sgirish 
183244961713Sgirish 	mac_rx(nxgep->mach, rcrp->rcr_mac_handle, mp);
183344961713Sgirish }
183444961713Sgirish 
183544961713Sgirish 
183644961713Sgirish /*
183744961713Sgirish  * This routine is the main packet receive processing function.
183844961713Sgirish  * It gets the packet type, error code, and buffer related
183944961713Sgirish  * information from the receive completion entry.
184044961713Sgirish  * How many completion entries to process is based on the number of packets
184144961713Sgirish  * queued by the hardware, a hardware maintained tail pointer
184244961713Sgirish  * and a configurable receive packet count.
184344961713Sgirish  *
184444961713Sgirish  * A chain of message blocks will be created as result of processing
184544961713Sgirish  * the completion entries. This chain of message blocks will be returned and
184644961713Sgirish  * a hardware control status register will be updated with the number of
184744961713Sgirish  * packets were removed from the hardware queue.
184844961713Sgirish  *
184944961713Sgirish  */
185044961713Sgirish mblk_t *
185144961713Sgirish nxge_rx_pkts(p_nxge_t nxgep, uint_t vindex, p_nxge_ldv_t ldvp,
185244961713Sgirish     p_rx_rcr_ring_t *rcrp, rx_dma_ctl_stat_t cs)
185344961713Sgirish {
185444961713Sgirish 	npi_handle_t		handle;
185544961713Sgirish 	uint8_t			channel;
185644961713Sgirish 	p_rx_rcr_rings_t	rx_rcr_rings;
185744961713Sgirish 	p_rx_rcr_ring_t		rcr_p;
185844961713Sgirish 	uint32_t		comp_rd_index;
185944961713Sgirish 	p_rcr_entry_t		rcr_desc_rd_head_p;
186044961713Sgirish 	p_rcr_entry_t		rcr_desc_rd_head_pp;
186144961713Sgirish 	p_mblk_t		nmp, mp_cont, head_mp, *tail_mp;
186244961713Sgirish 	uint16_t		qlen, nrcr_read, npkt_read;
186344961713Sgirish 	uint32_t qlen_hw;
186444961713Sgirish 	boolean_t		multi;
186514ea4bb7Ssd 	rcrcfig_b_t rcr_cfg_b;
1866a3c5bd6dSspeer #if defined(_BIG_ENDIAN)
186744961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
186844961713Sgirish #endif
186944961713Sgirish 
187044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:vindex %d "
187144961713Sgirish 		"channel %d", vindex, ldvp->channel));
187244961713Sgirish 
187344961713Sgirish 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
187444961713Sgirish 		return (NULL);
187544961713Sgirish 	}
187644961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
187744961713Sgirish 	rx_rcr_rings = nxgep->rx_rcr_rings;
187844961713Sgirish 	rcr_p = rx_rcr_rings->rcr_rings[vindex];
187944961713Sgirish 	channel = rcr_p->rdc;
188044961713Sgirish 	if (channel != ldvp->channel) {
188144961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:index %d "
188244961713Sgirish 			"channel %d, and rcr channel %d not matched.",
188344961713Sgirish 			vindex, ldvp->channel, channel));
188444961713Sgirish 		return (NULL);
188544961713Sgirish 	}
188644961713Sgirish 
188744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
188844961713Sgirish 		"==> nxge_rx_pkts: START: rcr channel %d "
188944961713Sgirish 		"head_p $%p head_pp $%p  index %d ",
189044961713Sgirish 		channel, rcr_p->rcr_desc_rd_head_p,
189144961713Sgirish 		rcr_p->rcr_desc_rd_head_pp,
189244961713Sgirish 		rcr_p->comp_rd_index));
189344961713Sgirish 
189444961713Sgirish 
1895a3c5bd6dSspeer #if !defined(_BIG_ENDIAN)
189644961713Sgirish 	qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff;
189744961713Sgirish #else
189844961713Sgirish 	rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen);
189944961713Sgirish 	if (rs != NPI_SUCCESS) {
190044961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:index %d "
190144961713Sgirish 		"channel %d, get qlen failed 0x%08x",
190244961713Sgirish 		vindex, ldvp->channel, rs));
190344961713Sgirish 		return (NULL);
190444961713Sgirish 	}
190544961713Sgirish #endif
190644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d "
190744961713Sgirish 		"qlen %d", channel, qlen));
190844961713Sgirish 
190944961713Sgirish 
191044961713Sgirish 
191144961713Sgirish 	if (!qlen) {
191244961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
191344961713Sgirish 			"==> nxge_rx_pkts:rcr channel %d "
191444961713Sgirish 			"qlen %d (no pkts)", channel, qlen));
191544961713Sgirish 
191644961713Sgirish 		return (NULL);
191744961713Sgirish 	}
191844961713Sgirish 
191944961713Sgirish 	comp_rd_index = rcr_p->comp_rd_index;
192044961713Sgirish 
192144961713Sgirish 	rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p;
192244961713Sgirish 	rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp;
192344961713Sgirish 	nrcr_read = npkt_read = 0;
192444961713Sgirish 
192544961713Sgirish 	/*
192644961713Sgirish 	 * Number of packets queued
192744961713Sgirish 	 * (The jumbo or multi packet will be counted as only one
192844961713Sgirish 	 *  packets and it may take up more than one completion entry).
192944961713Sgirish 	 */
193044961713Sgirish 	qlen_hw = (qlen < nxge_max_rx_pkts) ?
193144961713Sgirish 		qlen : nxge_max_rx_pkts;
193244961713Sgirish 	head_mp = NULL;
193344961713Sgirish 	tail_mp = &head_mp;
193444961713Sgirish 	nmp = mp_cont = NULL;
193544961713Sgirish 	multi = B_FALSE;
193644961713Sgirish 
1937a3c5bd6dSspeer 	while (qlen_hw) {
193844961713Sgirish 
193944961713Sgirish #ifdef NXGE_DEBUG
194044961713Sgirish 		nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p);
194144961713Sgirish #endif
194244961713Sgirish 		/*
194344961713Sgirish 		 * Process one completion ring entry.
194444961713Sgirish 		 */
194544961713Sgirish 		nxge_receive_packet(nxgep,
194644961713Sgirish 			rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont);
194744961713Sgirish 
194844961713Sgirish 		/*
194944961713Sgirish 		 * message chaining modes
195044961713Sgirish 		 */
195114ea4bb7Ssd 		if (nmp) {
195244961713Sgirish 			nmp->b_next = NULL;
195314ea4bb7Ssd 			if (!multi && !mp_cont) { /* frame fits a partition */
195414ea4bb7Ssd 				*tail_mp = nmp;
195514ea4bb7Ssd 				tail_mp = &nmp->b_next;
195614ea4bb7Ssd 				nmp = NULL;
195714ea4bb7Ssd 			} else if (multi && !mp_cont) { /* first segment */
195814ea4bb7Ssd 				*tail_mp = nmp;
195914ea4bb7Ssd 				tail_mp = &nmp->b_cont;
196014ea4bb7Ssd 			} else if (multi && mp_cont) {	/* mid of multi segs */
196114ea4bb7Ssd 				*tail_mp = mp_cont;
196214ea4bb7Ssd 				tail_mp = &mp_cont->b_cont;
196314ea4bb7Ssd 			} else if (!multi && mp_cont) { /* last segment */
1964a3c5bd6dSspeer 				*tail_mp = mp_cont;
196514ea4bb7Ssd 				tail_mp = &nmp->b_next;
196614ea4bb7Ssd 				nmp = NULL;
196714ea4bb7Ssd 			}
196844961713Sgirish 		}
196944961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
197044961713Sgirish 			"==> nxge_rx_pkts: loop: rcr channel %d "
197144961713Sgirish 			"before updating: multi %d "
197244961713Sgirish 			"nrcr_read %d "
197344961713Sgirish 			"npk read %d "
197444961713Sgirish 			"head_pp $%p  index %d ",
197544961713Sgirish 			channel,
197644961713Sgirish 			multi,
197744961713Sgirish 			nrcr_read, npkt_read, rcr_desc_rd_head_pp,
197844961713Sgirish 			comp_rd_index));
197944961713Sgirish 
198044961713Sgirish 		if (!multi) {
198144961713Sgirish 			qlen_hw--;
198244961713Sgirish 			npkt_read++;
198344961713Sgirish 		}
198444961713Sgirish 
198544961713Sgirish 		/*
198644961713Sgirish 		 * Update the next read entry.
198744961713Sgirish 		 */
198844961713Sgirish 		comp_rd_index = NEXT_ENTRY(comp_rd_index,
198944961713Sgirish 					rcr_p->comp_wrap_mask);
199044961713Sgirish 
199144961713Sgirish 		rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p,
199244961713Sgirish 				rcr_p->rcr_desc_first_p,
199344961713Sgirish 				rcr_p->rcr_desc_last_p);
199444961713Sgirish 
199544961713Sgirish 		nrcr_read++;
199644961713Sgirish 
199744961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
199844961713Sgirish 			"<== nxge_rx_pkts: (SAM, process one packet) "
199944961713Sgirish 			"nrcr_read %d",
200044961713Sgirish 			nrcr_read));
200144961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
200244961713Sgirish 			"==> nxge_rx_pkts: loop: rcr channel %d "
200344961713Sgirish 			"multi %d "
200444961713Sgirish 			"nrcr_read %d "
200544961713Sgirish 			"npk read %d "
200644961713Sgirish 			"head_pp $%p  index %d ",
200744961713Sgirish 			channel,
200844961713Sgirish 			multi,
200944961713Sgirish 			nrcr_read, npkt_read, rcr_desc_rd_head_pp,
201044961713Sgirish 			comp_rd_index));
201144961713Sgirish 
201244961713Sgirish 	}
201344961713Sgirish 
201444961713Sgirish 	rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp;
201544961713Sgirish 	rcr_p->comp_rd_index = comp_rd_index;
201644961713Sgirish 	rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p;
201744961713Sgirish 
201814ea4bb7Ssd 	if ((nxgep->intr_timeout != rcr_p->intr_timeout) ||
201914ea4bb7Ssd 		(nxgep->intr_threshold != rcr_p->intr_threshold)) {
202014ea4bb7Ssd 		rcr_p->intr_timeout = nxgep->intr_timeout;
202114ea4bb7Ssd 		rcr_p->intr_threshold = nxgep->intr_threshold;
202214ea4bb7Ssd 		rcr_cfg_b.value = 0x0ULL;
202314ea4bb7Ssd 		if (rcr_p->intr_timeout)
202414ea4bb7Ssd 			rcr_cfg_b.bits.ldw.entout = 1;
202514ea4bb7Ssd 		rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout;
202614ea4bb7Ssd 		rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold;
202714ea4bb7Ssd 		RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG,
202814ea4bb7Ssd 				    channel, rcr_cfg_b.value);
202914ea4bb7Ssd 	}
203044961713Sgirish 
203144961713Sgirish 	cs.bits.ldw.pktread = npkt_read;
203244961713Sgirish 	cs.bits.ldw.ptrread = nrcr_read;
203344961713Sgirish 	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
203444961713Sgirish 			    channel, cs.value);
203544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
203644961713Sgirish 		"==> nxge_rx_pkts: EXIT: rcr channel %d "
203744961713Sgirish 		"head_pp $%p  index %016llx ",
203844961713Sgirish 		channel,
203944961713Sgirish 		rcr_p->rcr_desc_rd_head_pp,
204044961713Sgirish 		rcr_p->comp_rd_index));
204144961713Sgirish 	/*
204244961713Sgirish 	 * Update RCR buffer pointer read and number of packets
204344961713Sgirish 	 * read.
204444961713Sgirish 	 */
204544961713Sgirish 
204644961713Sgirish 	*rcrp = rcr_p;
204744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_pkts"));
204844961713Sgirish 	return (head_mp);
204944961713Sgirish }
205044961713Sgirish 
205144961713Sgirish void
205244961713Sgirish nxge_receive_packet(p_nxge_t nxgep,
205344961713Sgirish     p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p,
205444961713Sgirish     boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont)
205544961713Sgirish {
205644961713Sgirish 	p_mblk_t		nmp = NULL;
205744961713Sgirish 	uint64_t		multi;
205844961713Sgirish 	uint64_t		dcf_err;
205944961713Sgirish 	uint8_t			channel;
206044961713Sgirish 
206144961713Sgirish 	boolean_t		first_entry = B_TRUE;
206244961713Sgirish 	boolean_t		is_tcp_udp = B_FALSE;
206344961713Sgirish 	boolean_t		buffer_free = B_FALSE;
206444961713Sgirish 	boolean_t		error_send_up = B_FALSE;
206544961713Sgirish 	uint8_t			error_type;
206644961713Sgirish 	uint16_t		l2_len;
206744961713Sgirish 	uint16_t		skip_len;
206844961713Sgirish 	uint8_t			pktbufsz_type;
206914ea4bb7Ssd 	uint16_t		pktbufsz;
207044961713Sgirish 	uint64_t		rcr_entry;
207144961713Sgirish 	uint64_t		*pkt_buf_addr_pp;
207244961713Sgirish 	uint64_t		*pkt_buf_addr_p;
207344961713Sgirish 	uint32_t		buf_offset;
207444961713Sgirish 	uint32_t		bsize;
207544961713Sgirish 	uint32_t		error_disp_cnt;
207644961713Sgirish 	uint32_t		msg_index;
207744961713Sgirish 	p_rx_rbr_ring_t		rx_rbr_p;
207844961713Sgirish 	p_rx_msg_t 		*rx_msg_ring_p;
207944961713Sgirish 	p_rx_msg_t		rx_msg_p;
208044961713Sgirish 	uint16_t		sw_offset_bytes = 0, hdr_size = 0;
208144961713Sgirish 	nxge_status_t		status = NXGE_OK;
208244961713Sgirish 	boolean_t		is_valid = B_FALSE;
208344961713Sgirish 	p_nxge_rx_ring_stats_t	rdc_stats;
2084a3c5bd6dSspeer 	uint32_t		bytes_read;
2085a3c5bd6dSspeer 	uint64_t		pkt_type;
2086a3c5bd6dSspeer 	uint64_t		frag;
208744961713Sgirish #ifdef	NXGE_DEBUG
208844961713Sgirish 	int			dump_len;
208944961713Sgirish #endif
209044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet"));
209144961713Sgirish 	first_entry = (*mp == NULL) ? B_TRUE : B_FALSE;
209244961713Sgirish 
209344961713Sgirish 	rcr_entry = *((uint64_t *)rcr_desc_rd_head_p);
209444961713Sgirish 
209544961713Sgirish 	multi = (rcr_entry & RCR_MULTI_MASK);
209644961713Sgirish 	dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK);
209744961713Sgirish 	pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK);
209844961713Sgirish 
209944961713Sgirish 	error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT);
210044961713Sgirish 	frag = (rcr_entry & RCR_FRAG_MASK);
210144961713Sgirish 
210244961713Sgirish 	l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT);
210344961713Sgirish 
210444961713Sgirish 	pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >>
210544961713Sgirish 				RCR_PKTBUFSZ_SHIFT);
210644961713Sgirish 
210744961713Sgirish 	pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) <<
210844961713Sgirish 			RCR_PKT_BUF_ADDR_SHIFT);
210944961713Sgirish 
211044961713Sgirish 	channel = rcr_p->rdc;
211144961713Sgirish 
211244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
211344961713Sgirish 		"==> nxge_receive_packet: entryp $%p entry 0x%0llx "
211414ea4bb7Ssd 		"pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
211544961713Sgirish 		"error_type 0x%x pkt_type 0x%x  "
211644961713Sgirish 		"pktbufsz_type %d ",
211744961713Sgirish 		rcr_desc_rd_head_p,
211844961713Sgirish 		rcr_entry, pkt_buf_addr_pp, l2_len,
211944961713Sgirish 		multi,
212044961713Sgirish 		error_type,
212144961713Sgirish 		pkt_type,
212244961713Sgirish 		pktbufsz_type));
212344961713Sgirish 
212444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
212544961713Sgirish 		"==> nxge_receive_packet: entryp $%p entry 0x%0llx "
212614ea4bb7Ssd 		"pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
212744961713Sgirish 		"error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p,
212844961713Sgirish 		rcr_entry, pkt_buf_addr_pp, l2_len,
212944961713Sgirish 		multi,
213044961713Sgirish 		error_type,
213144961713Sgirish 		pkt_type));
213244961713Sgirish 
213344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
213444961713Sgirish 		"==> (rbr) nxge_receive_packet: entry 0x%0llx "
213544961713Sgirish 		"full pkt_buf_addr_pp $%p l2_len %d",
213644961713Sgirish 		rcr_entry, pkt_buf_addr_pp, l2_len));
213744961713Sgirish 
213844961713Sgirish 	/* get the stats ptr */
213944961713Sgirish 	rdc_stats = rcr_p->rdc_stats;
214044961713Sgirish 
214144961713Sgirish 	if (!l2_len) {
214244961713Sgirish 
214344961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
214444961713Sgirish 			"<== nxge_receive_packet: failed: l2 length is 0."));
214544961713Sgirish 		return;
214644961713Sgirish 	}
214744961713Sgirish 
214856d930aeSspeer 	/* Hardware sends us 4 bytes of CRC as no stripping is done.  */
214956d930aeSspeer 	l2_len -= ETHERFCSL;
215056d930aeSspeer 
215144961713Sgirish 	/* shift 6 bits to get the full io address */
215244961713Sgirish 	pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp <<
215344961713Sgirish 				RCR_PKT_BUF_ADDR_SHIFT_FULL);
215444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
215544961713Sgirish 		"==> (rbr) nxge_receive_packet: entry 0x%0llx "
215644961713Sgirish 		"full pkt_buf_addr_pp $%p l2_len %d",
215744961713Sgirish 		rcr_entry, pkt_buf_addr_pp, l2_len));
215844961713Sgirish 
215944961713Sgirish 	rx_rbr_p = rcr_p->rx_rbr_p;
216044961713Sgirish 	rx_msg_ring_p = rx_rbr_p->rx_msg_ring;
216144961713Sgirish 
216244961713Sgirish 	if (first_entry) {
216344961713Sgirish 		hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL :
216444961713Sgirish 			RXDMA_HDR_SIZE_DEFAULT);
216544961713Sgirish 
216644961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
216744961713Sgirish 			"==> nxge_receive_packet: first entry 0x%016llx "
216844961713Sgirish 			"pkt_buf_addr_pp $%p l2_len %d hdr %d",
216944961713Sgirish 			rcr_entry, pkt_buf_addr_pp, l2_len,
217044961713Sgirish 			hdr_size));
217144961713Sgirish 	}
217244961713Sgirish 
217344961713Sgirish 	MUTEX_ENTER(&rcr_p->lock);
217444961713Sgirish 	MUTEX_ENTER(&rx_rbr_p->lock);
217544961713Sgirish 
2176a3c5bd6dSspeer 	bytes_read = rcr_p->rcvd_pkt_bytes;
2177a3c5bd6dSspeer 
217844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
217944961713Sgirish 		"==> (rbr 1) nxge_receive_packet: entry 0x%0llx "
218044961713Sgirish 		"full pkt_buf_addr_pp $%p l2_len %d",
218144961713Sgirish 		rcr_entry, pkt_buf_addr_pp, l2_len));
218244961713Sgirish 
218344961713Sgirish 	/*
218444961713Sgirish 	 * Packet buffer address in the completion entry points
218544961713Sgirish 	 * to the starting buffer address (offset 0).
218644961713Sgirish 	 * Use the starting buffer address to locate the corresponding
218744961713Sgirish 	 * kernel address.
218844961713Sgirish 	 */
218944961713Sgirish 	status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p,
219044961713Sgirish 			pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p,
219144961713Sgirish 			&buf_offset,
219244961713Sgirish 			&msg_index);
219344961713Sgirish 
219444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
219544961713Sgirish 		"==> (rbr 2) nxge_receive_packet: entry 0x%0llx "
219644961713Sgirish 		"full pkt_buf_addr_pp $%p l2_len %d",
219744961713Sgirish 		rcr_entry, pkt_buf_addr_pp, l2_len));
219844961713Sgirish 
219944961713Sgirish 	if (status != NXGE_OK) {
220044961713Sgirish 		MUTEX_EXIT(&rx_rbr_p->lock);
220144961713Sgirish 		MUTEX_EXIT(&rcr_p->lock);
220244961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
220344961713Sgirish 			"<== nxge_receive_packet: found vaddr failed %d",
220444961713Sgirish 				status));
220544961713Sgirish 		return;
220644961713Sgirish 	}
220744961713Sgirish 
220844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
220944961713Sgirish 		"==> (rbr 3) nxge_receive_packet: entry 0x%0llx "
221044961713Sgirish 		"full pkt_buf_addr_pp $%p l2_len %d",
221144961713Sgirish 		rcr_entry, pkt_buf_addr_pp, l2_len));
221244961713Sgirish 
221344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
221444961713Sgirish 		"==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
221544961713Sgirish 		"full pkt_buf_addr_pp $%p l2_len %d",
221644961713Sgirish 		msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
221744961713Sgirish 
221844961713Sgirish 	rx_msg_p = rx_msg_ring_p[msg_index];
221944961713Sgirish 
222044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
222144961713Sgirish 		"==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
222244961713Sgirish 		"full pkt_buf_addr_pp $%p l2_len %d",
222344961713Sgirish 		msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
222444961713Sgirish 
222544961713Sgirish 	switch (pktbufsz_type) {
222644961713Sgirish 	case RCR_PKTBUFSZ_0:
222744961713Sgirish 		bsize = rx_rbr_p->pkt_buf_size0_bytes;
222844961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
222944961713Sgirish 			"==> nxge_receive_packet: 0 buf %d", bsize));
223044961713Sgirish 		break;
223144961713Sgirish 	case RCR_PKTBUFSZ_1:
223244961713Sgirish 		bsize = rx_rbr_p->pkt_buf_size1_bytes;
223344961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
223444961713Sgirish 			"==> nxge_receive_packet: 1 buf %d", bsize));
223544961713Sgirish 		break;
223644961713Sgirish 	case RCR_PKTBUFSZ_2:
223744961713Sgirish 		bsize = rx_rbr_p->pkt_buf_size2_bytes;
223844961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
223944961713Sgirish 			"==> nxge_receive_packet: 2 buf %d", bsize));
224044961713Sgirish 		break;
224144961713Sgirish 	case RCR_SINGLE_BLOCK:
224244961713Sgirish 		bsize = rx_msg_p->block_size;
224344961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
224444961713Sgirish 			"==> nxge_receive_packet: single %d", bsize));
224544961713Sgirish 
224644961713Sgirish 		break;
224744961713Sgirish 	default:
224844961713Sgirish 		MUTEX_EXIT(&rx_rbr_p->lock);
224944961713Sgirish 		MUTEX_EXIT(&rcr_p->lock);
225044961713Sgirish 		return;
225144961713Sgirish 	}
225244961713Sgirish 
225344961713Sgirish 	DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma,
225444961713Sgirish 		(buf_offset + sw_offset_bytes),
225544961713Sgirish 		(hdr_size + l2_len),
225644961713Sgirish 		DDI_DMA_SYNC_FORCPU);
225744961713Sgirish 
225844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
225944961713Sgirish 		"==> nxge_receive_packet: after first dump:usage count"));
226044961713Sgirish 
226144961713Sgirish 	if (rx_msg_p->cur_usage_cnt == 0) {
226244961713Sgirish 		if (rx_rbr_p->rbr_use_bcopy) {
226344961713Sgirish 			atomic_inc_32(&rx_rbr_p->rbr_consumed);
226444961713Sgirish 			if (rx_rbr_p->rbr_consumed <
226544961713Sgirish 					rx_rbr_p->rbr_threshold_hi) {
226644961713Sgirish 				if (rx_rbr_p->rbr_threshold_lo == 0 ||
226744961713Sgirish 					((rx_rbr_p->rbr_consumed >=
226844961713Sgirish 						rx_rbr_p->rbr_threshold_lo) &&
226944961713Sgirish 						(rx_rbr_p->rbr_bufsize_type >=
227044961713Sgirish 							pktbufsz_type))) {
227144961713Sgirish 					rx_msg_p->rx_use_bcopy = B_TRUE;
227244961713Sgirish 				}
227344961713Sgirish 			} else {
227444961713Sgirish 				rx_msg_p->rx_use_bcopy = B_TRUE;
227544961713Sgirish 			}
227644961713Sgirish 		}
227744961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
227844961713Sgirish 			"==> nxge_receive_packet: buf %d (new block) ",
227944961713Sgirish 			bsize));
228044961713Sgirish 
228144961713Sgirish 		rx_msg_p->pkt_buf_size_code = pktbufsz_type;
228244961713Sgirish 		rx_msg_p->pkt_buf_size = bsize;
228344961713Sgirish 		rx_msg_p->cur_usage_cnt = 1;
228444961713Sgirish 		if (pktbufsz_type == RCR_SINGLE_BLOCK) {
228544961713Sgirish 			NXGE_DEBUG_MSG((nxgep, RX2_CTL,
228644961713Sgirish 				"==> nxge_receive_packet: buf %d "
228744961713Sgirish 				"(single block) ",
228844961713Sgirish 				bsize));
228944961713Sgirish 			/*
229044961713Sgirish 			 * Buffer can be reused once the free function
229144961713Sgirish 			 * is called.
229244961713Sgirish 			 */
229344961713Sgirish 			rx_msg_p->max_usage_cnt = 1;
229444961713Sgirish 			buffer_free = B_TRUE;
229544961713Sgirish 		} else {
229644961713Sgirish 			rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize;
229744961713Sgirish 			if (rx_msg_p->max_usage_cnt == 1) {
229844961713Sgirish 				buffer_free = B_TRUE;
229944961713Sgirish 			}
230044961713Sgirish 		}
230144961713Sgirish 	} else {
230244961713Sgirish 		rx_msg_p->cur_usage_cnt++;
230344961713Sgirish 		if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) {
230444961713Sgirish 			buffer_free = B_TRUE;
230544961713Sgirish 		}
230644961713Sgirish 	}
230744961713Sgirish 
230844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
230944961713Sgirish 	    "msgbuf index = %d l2len %d bytes usage %d max_usage %d ",
231044961713Sgirish 		msg_index, l2_len,
231144961713Sgirish 		rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt));
231244961713Sgirish 
231344961713Sgirish 	if ((error_type) || (dcf_err)) {
231444961713Sgirish 		rdc_stats->ierrors++;
231544961713Sgirish 		if (dcf_err) {
231644961713Sgirish 			rdc_stats->dcf_err++;
231744961713Sgirish #ifdef	NXGE_DEBUG
231844961713Sgirish 			if (!rdc_stats->dcf_err) {
231944961713Sgirish 				NXGE_DEBUG_MSG((nxgep, RX_CTL,
232044961713Sgirish 				"nxge_receive_packet: channel %d dcf_err rcr"
232144961713Sgirish 				" 0x%llx", channel, rcr_entry));
232244961713Sgirish 			}
232344961713Sgirish #endif
232444961713Sgirish 			NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL,
232544961713Sgirish 					NXGE_FM_EREPORT_RDMC_DCF_ERR);
232644961713Sgirish 		} else {
232744961713Sgirish 				/* Update error stats */
232844961713Sgirish 			error_disp_cnt = NXGE_ERROR_SHOW_MAX;
232944961713Sgirish 			rdc_stats->errlog.compl_err_type = error_type;
233044961713Sgirish 			NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL,
233144961713Sgirish 				    NXGE_FM_EREPORT_RDMC_COMPLETION_ERR);
233244961713Sgirish 
233344961713Sgirish 			switch (error_type) {
233444961713Sgirish 				case RCR_L2_ERROR:
233544961713Sgirish 					rdc_stats->l2_err++;
233644961713Sgirish 					if (rdc_stats->l2_err <
233744961713Sgirish 						error_disp_cnt)
233844961713Sgirish 						NXGE_ERROR_MSG((nxgep,
233944961713Sgirish 						NXGE_ERR_CTL,
234044961713Sgirish 						" nxge_receive_packet:"
234144961713Sgirish 						" channel %d RCR L2_ERROR",
234244961713Sgirish 						channel));
234344961713Sgirish 					break;
234444961713Sgirish 				case RCR_L4_CSUM_ERROR:
234544961713Sgirish 					error_send_up = B_TRUE;
234644961713Sgirish 					rdc_stats->l4_cksum_err++;
234744961713Sgirish 					if (rdc_stats->l4_cksum_err <
234844961713Sgirish 						error_disp_cnt)
234944961713Sgirish 						NXGE_ERROR_MSG((nxgep,
235044961713Sgirish 						NXGE_ERR_CTL,
235144961713Sgirish 							" nxge_receive_packet:"
235244961713Sgirish 							" channel %d"
235344961713Sgirish 							" RCR L4_CSUM_ERROR",
235444961713Sgirish 							channel));
235544961713Sgirish 					break;
235644961713Sgirish 				case RCR_FFLP_SOFT_ERROR:
235744961713Sgirish 					error_send_up = B_TRUE;
235844961713Sgirish 					rdc_stats->fflp_soft_err++;
235944961713Sgirish 					if (rdc_stats->fflp_soft_err <
236044961713Sgirish 						error_disp_cnt)
236144961713Sgirish 						NXGE_ERROR_MSG((nxgep,
236244961713Sgirish 							NXGE_ERR_CTL,
236344961713Sgirish 							" nxge_receive_packet:"
236444961713Sgirish 							" channel %d"
236544961713Sgirish 							" RCR FFLP_SOFT_ERROR",
236644961713Sgirish 							channel));
236744961713Sgirish 					break;
236844961713Sgirish 				case RCR_ZCP_SOFT_ERROR:
236944961713Sgirish 					error_send_up = B_TRUE;
237044961713Sgirish 					rdc_stats->fflp_soft_err++;
237144961713Sgirish 					if (rdc_stats->zcp_soft_err <
237244961713Sgirish 						error_disp_cnt)
237344961713Sgirish 						NXGE_ERROR_MSG((nxgep,
237444961713Sgirish 							NXGE_ERR_CTL,
237544961713Sgirish 							" nxge_receive_packet:"
237644961713Sgirish 							" Channel %d"
237744961713Sgirish 							" RCR ZCP_SOFT_ERROR",
237844961713Sgirish 							channel));
237944961713Sgirish 					break;
238044961713Sgirish 				default:
238144961713Sgirish 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
238244961713Sgirish 							" nxge_receive_packet:"
238344961713Sgirish 							" Channel %d"
238444961713Sgirish 							" RCR entry 0x%llx"
238544961713Sgirish 							" error 0x%x",
238644961713Sgirish 							rcr_entry, channel,
238744961713Sgirish 							error_type));
238844961713Sgirish 					break;
238944961713Sgirish 			}
239044961713Sgirish 		}
239144961713Sgirish 
239244961713Sgirish 		/*
239344961713Sgirish 		 * Update and repost buffer block if max usage
239444961713Sgirish 		 * count is reached.
239544961713Sgirish 		 */
239644961713Sgirish 		if (error_send_up == B_FALSE) {
2397958cea9eSml 			atomic_inc_32(&rx_msg_p->ref_cnt);
2398958cea9eSml 			atomic_inc_32(&nxge_mblks_pending);
239944961713Sgirish 			if (buffer_free == B_TRUE) {
240044961713Sgirish 				rx_msg_p->free = B_TRUE;
240144961713Sgirish 			}
240244961713Sgirish 
240344961713Sgirish 			MUTEX_EXIT(&rx_rbr_p->lock);
240444961713Sgirish 			MUTEX_EXIT(&rcr_p->lock);
240544961713Sgirish 			nxge_freeb(rx_msg_p);
240644961713Sgirish 			return;
240744961713Sgirish 		}
240844961713Sgirish 	}
240944961713Sgirish 
241044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
241144961713Sgirish 		"==> nxge_receive_packet: DMA sync second "));
241244961713Sgirish 
241344961713Sgirish 	skip_len = sw_offset_bytes + hdr_size;
241444961713Sgirish 	if (!rx_msg_p->rx_use_bcopy) {
2415958cea9eSml 		/*
2416958cea9eSml 		 * For loaned up buffers, the driver reference count
2417958cea9eSml 		 * will be incremented first and then the free state.
2418958cea9eSml 		 */
241944961713Sgirish 		nmp = nxge_dupb(rx_msg_p, buf_offset, bsize);
242044961713Sgirish 	} else {
242144961713Sgirish 		nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, l2_len);
242244961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
242344961713Sgirish 			"==> nxge_receive_packet: use bcopy "
242444961713Sgirish 			"rbr consumed %d "
242544961713Sgirish 			"pktbufsz_type %d "
242644961713Sgirish 			"offset %d "
242744961713Sgirish 			"hdr_size %d l2_len %d "
242844961713Sgirish 			"nmp->b_rptr $%p",
242944961713Sgirish 			rx_rbr_p->rbr_consumed,
243044961713Sgirish 			pktbufsz_type,
243144961713Sgirish 			buf_offset, hdr_size, l2_len,
243244961713Sgirish 			nmp->b_rptr));
243344961713Sgirish 	}
243444961713Sgirish 	if (nmp != NULL) {
243514ea4bb7Ssd 		pktbufsz = nxge_get_pktbuf_size(nxgep, pktbufsz_type,
243614ea4bb7Ssd 			rx_rbr_p->rbr_cfgb);
243744961713Sgirish 		if (!rx_msg_p->rx_use_bcopy) {
243814ea4bb7Ssd 			if (first_entry) {
243914ea4bb7Ssd 				bytes_read = 0;
244014ea4bb7Ssd 				nmp->b_rptr = &nmp->b_rptr[skip_len];
244114ea4bb7Ssd 				if (l2_len > pktbufsz - skip_len)
244214ea4bb7Ssd 					nmp->b_wptr = &nmp->b_rptr[pktbufsz
244314ea4bb7Ssd 						- skip_len];
244414ea4bb7Ssd 				else
244514ea4bb7Ssd 					nmp->b_wptr = &nmp->b_rptr[l2_len];
244614ea4bb7Ssd 			} else {
244714ea4bb7Ssd 				if (l2_len - bytes_read > pktbufsz)
244814ea4bb7Ssd 					nmp->b_wptr = &nmp->b_rptr[pktbufsz];
244914ea4bb7Ssd 				else
245014ea4bb7Ssd 					nmp->b_wptr =
245114ea4bb7Ssd 					    &nmp->b_rptr[l2_len - bytes_read];
245214ea4bb7Ssd 			}
245314ea4bb7Ssd 			bytes_read += nmp->b_wptr - nmp->b_rptr;
245444961713Sgirish 			NXGE_DEBUG_MSG((nxgep, RX_CTL,
245544961713Sgirish 				"==> nxge_receive_packet after dupb: "
245644961713Sgirish 				"rbr consumed %d "
245744961713Sgirish 				"pktbufsz_type %d "
245844961713Sgirish 				"nmp $%p rptr $%p wptr $%p "
245944961713Sgirish 				"buf_offset %d bzise %d l2_len %d skip_len %d",
246044961713Sgirish 				rx_rbr_p->rbr_consumed,
246144961713Sgirish 				pktbufsz_type,
246244961713Sgirish 				nmp, nmp->b_rptr, nmp->b_wptr,
246344961713Sgirish 				buf_offset, bsize, l2_len, skip_len));
246444961713Sgirish 		}
246544961713Sgirish 	} else {
246644961713Sgirish 		cmn_err(CE_WARN, "!nxge_receive_packet: "
246744961713Sgirish 			"update stats (error)");
2468*2e59129aSraghus 		atomic_inc_32(&rx_msg_p->ref_cnt);
2469*2e59129aSraghus 		atomic_inc_32(&nxge_mblks_pending);
2470*2e59129aSraghus 		if (buffer_free == B_TRUE) {
2471*2e59129aSraghus 			rx_msg_p->free = B_TRUE;
2472*2e59129aSraghus 		}
2473*2e59129aSraghus 		MUTEX_EXIT(&rx_rbr_p->lock);
2474*2e59129aSraghus 		MUTEX_EXIT(&rcr_p->lock);
2475*2e59129aSraghus 		nxge_freeb(rx_msg_p);
2476*2e59129aSraghus 		return;
247744961713Sgirish 	}
247844961713Sgirish 	if (buffer_free == B_TRUE) {
247944961713Sgirish 		rx_msg_p->free = B_TRUE;
248044961713Sgirish 	}
248144961713Sgirish 
248244961713Sgirish 	/*
248344961713Sgirish 	 * ERROR, FRAG and PKT_TYPE are only reported
248444961713Sgirish 	 * in the first entry.
248544961713Sgirish 	 * If a packet is not fragmented and no error bit is set, then
248644961713Sgirish 	 * L4 checksum is OK.
248744961713Sgirish 	 */
248844961713Sgirish 	is_valid = (nmp != NULL);
248944961713Sgirish 	rdc_stats->ibytes += l2_len;
249044961713Sgirish 	rdc_stats->ipackets++;
249144961713Sgirish 	MUTEX_EXIT(&rx_rbr_p->lock);
249244961713Sgirish 	MUTEX_EXIT(&rcr_p->lock);
249344961713Sgirish 
249444961713Sgirish 	if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) {
249544961713Sgirish 		atomic_inc_32(&rx_msg_p->ref_cnt);
249656d930aeSspeer 		atomic_inc_32(&nxge_mblks_pending);
249744961713Sgirish 		nxge_freeb(rx_msg_p);
249844961713Sgirish 	}
249944961713Sgirish 
250044961713Sgirish 	if (is_valid) {
2501a3c5bd6dSspeer 		nmp->b_cont = NULL;
250244961713Sgirish 		if (first_entry) {
250344961713Sgirish 			*mp = nmp;
250444961713Sgirish 			*mp_cont = NULL;
2505a3c5bd6dSspeer 		} else
250644961713Sgirish 			*mp_cont = nmp;
250744961713Sgirish 	}
250844961713Sgirish 
250944961713Sgirish 	/*
251044961713Sgirish 	 * Update stats and hardware checksuming.
251144961713Sgirish 	 */
251244961713Sgirish 	if (is_valid && !multi) {
251344961713Sgirish 
251444961713Sgirish 		is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP ||
251544961713Sgirish 				pkt_type == RCR_PKT_IS_UDP) ?
251644961713Sgirish 					B_TRUE: B_FALSE);
251744961713Sgirish 
251844961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: "
251914ea4bb7Ssd 			"is_valid 0x%x multi 0x%llx pkt %d frag %d error %d",
252044961713Sgirish 			is_valid, multi, is_tcp_udp, frag, error_type));
252144961713Sgirish 
252244961713Sgirish 		if (is_tcp_udp && !frag && !error_type) {
252344961713Sgirish 			(void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0,
252444961713Sgirish 				HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0);
252544961713Sgirish 			NXGE_DEBUG_MSG((nxgep, RX_CTL,
252644961713Sgirish 				"==> nxge_receive_packet: Full tcp/udp cksum "
252714ea4bb7Ssd 				"is_valid 0x%x multi 0x%llx pkt %d frag %d "
252844961713Sgirish 				"error %d",
252944961713Sgirish 				is_valid, multi, is_tcp_udp, frag, error_type));
253044961713Sgirish 		}
253144961713Sgirish 	}
253244961713Sgirish 
253344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
253444961713Sgirish 		"==> nxge_receive_packet: *mp 0x%016llx", *mp));
253544961713Sgirish 
253644961713Sgirish 	*multi_p = (multi == RCR_MULTI_MASK);
253744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: "
253844961713Sgirish 		"multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx",
253944961713Sgirish 		*multi_p, nmp, *mp, *mp_cont));
254044961713Sgirish }
254144961713Sgirish 
254244961713Sgirish /*ARGSUSED*/
254344961713Sgirish static nxge_status_t
254444961713Sgirish nxge_rx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp,
254544961713Sgirish 						rx_dma_ctl_stat_t cs)
254644961713Sgirish {
254744961713Sgirish 	p_nxge_rx_ring_stats_t	rdc_stats;
254844961713Sgirish 	npi_handle_t		handle;
254944961713Sgirish 	npi_status_t		rs;
255044961713Sgirish 	boolean_t		rxchan_fatal = B_FALSE;
255144961713Sgirish 	boolean_t		rxport_fatal = B_FALSE;
255244961713Sgirish 	uint8_t			channel;
255344961713Sgirish 	uint8_t			portn;
255444961713Sgirish 	nxge_status_t		status = NXGE_OK;
255544961713Sgirish 	uint32_t		error_disp_cnt = NXGE_ERROR_SHOW_MAX;
255644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts"));
255744961713Sgirish 
255844961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
255944961713Sgirish 	channel = ldvp->channel;
256044961713Sgirish 	portn = nxgep->mac.portnum;
256144961713Sgirish 	rdc_stats = &nxgep->statsp->rdc_stats[ldvp->vdma_index];
256244961713Sgirish 
256344961713Sgirish 	if (cs.bits.hdw.rbr_tmout) {
256444961713Sgirish 		rdc_stats->rx_rbr_tmout++;
256544961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
256644961713Sgirish 					NXGE_FM_EREPORT_RDMC_RBR_TMOUT);
256744961713Sgirish 		rxchan_fatal = B_TRUE;
256844961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
256944961713Sgirish 			"==> nxge_rx_err_evnts: rx_rbr_timeout"));
257044961713Sgirish 	}
257144961713Sgirish 	if (cs.bits.hdw.rsp_cnt_err) {
257244961713Sgirish 		rdc_stats->rsp_cnt_err++;
257344961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
257444961713Sgirish 					NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR);
257544961713Sgirish 		rxchan_fatal = B_TRUE;
257644961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
257744961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
257844961713Sgirish 			"rsp_cnt_err", channel));
257944961713Sgirish 	}
258044961713Sgirish 	if (cs.bits.hdw.byte_en_bus) {
258144961713Sgirish 		rdc_stats->byte_en_bus++;
258244961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
258344961713Sgirish 					NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS);
258444961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
258544961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
258644961713Sgirish 			"fatal error: byte_en_bus", channel));
258744961713Sgirish 		rxchan_fatal = B_TRUE;
258844961713Sgirish 	}
258944961713Sgirish 	if (cs.bits.hdw.rsp_dat_err) {
259044961713Sgirish 		rdc_stats->rsp_dat_err++;
259144961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
259244961713Sgirish 					NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR);
259344961713Sgirish 		rxchan_fatal = B_TRUE;
259444961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
259544961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
259644961713Sgirish 			"fatal error: rsp_dat_err", channel));
259744961713Sgirish 	}
259844961713Sgirish 	if (cs.bits.hdw.rcr_ack_err) {
259944961713Sgirish 		rdc_stats->rcr_ack_err++;
260044961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
260144961713Sgirish 					NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR);
260244961713Sgirish 		rxchan_fatal = B_TRUE;
260344961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
260444961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
260544961713Sgirish 			"fatal error: rcr_ack_err", channel));
260644961713Sgirish 	}
260744961713Sgirish 	if (cs.bits.hdw.dc_fifo_err) {
260844961713Sgirish 		rdc_stats->dc_fifo_err++;
260944961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
261044961713Sgirish 					NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR);
261144961713Sgirish 		/* This is not a fatal error! */
261244961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
261344961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
261444961713Sgirish 			"dc_fifo_err", channel));
261544961713Sgirish 		rxport_fatal = B_TRUE;
261644961713Sgirish 	}
261744961713Sgirish 	if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) {
261844961713Sgirish 		if ((rs = npi_rxdma_ring_perr_stat_get(handle,
261944961713Sgirish 				&rdc_stats->errlog.pre_par,
262044961713Sgirish 				&rdc_stats->errlog.sha_par))
262144961713Sgirish 				!= NPI_SUCCESS) {
262244961713Sgirish 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
262344961713Sgirish 				"==> nxge_rx_err_evnts(channel %d): "
262444961713Sgirish 				"rcr_sha_par: get perr", channel));
262544961713Sgirish 			return (NXGE_ERROR | rs);
262644961713Sgirish 		}
262744961713Sgirish 		if (cs.bits.hdw.rcr_sha_par) {
262844961713Sgirish 			rdc_stats->rcr_sha_par++;
262944961713Sgirish 			NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
263044961713Sgirish 					NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR);
263144961713Sgirish 			rxchan_fatal = B_TRUE;
263244961713Sgirish 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
263344961713Sgirish 				"==> nxge_rx_err_evnts(channel %d): "
263444961713Sgirish 				"fatal error: rcr_sha_par", channel));
263544961713Sgirish 		}
263644961713Sgirish 		if (cs.bits.hdw.rbr_pre_par) {
263744961713Sgirish 			rdc_stats->rbr_pre_par++;
263844961713Sgirish 			NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
263944961713Sgirish 					NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR);
264044961713Sgirish 			rxchan_fatal = B_TRUE;
264144961713Sgirish 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
264244961713Sgirish 				"==> nxge_rx_err_evnts(channel %d): "
264344961713Sgirish 				"fatal error: rbr_pre_par", channel));
264444961713Sgirish 		}
264544961713Sgirish 	}
264644961713Sgirish 	if (cs.bits.hdw.port_drop_pkt) {
264744961713Sgirish 		rdc_stats->port_drop_pkt++;
264844961713Sgirish 		if (rdc_stats->port_drop_pkt < error_disp_cnt)
264944961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
265044961713Sgirish 			"==> nxge_rx_err_evnts (channel %d): "
265144961713Sgirish 			"port_drop_pkt", channel));
265244961713Sgirish 	}
265344961713Sgirish 	if (cs.bits.hdw.wred_drop) {
265444961713Sgirish 		rdc_stats->wred_drop++;
265544961713Sgirish 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
265644961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
265744961713Sgirish 		"wred_drop", channel));
265844961713Sgirish 	}
265944961713Sgirish 	if (cs.bits.hdw.rbr_pre_empty) {
266044961713Sgirish 		rdc_stats->rbr_pre_empty++;
266144961713Sgirish 		if (rdc_stats->rbr_pre_empty < error_disp_cnt)
266244961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
266344961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
266444961713Sgirish 			"rbr_pre_empty", channel));
266544961713Sgirish 	}
266644961713Sgirish 	if (cs.bits.hdw.rcr_shadow_full) {
266744961713Sgirish 		rdc_stats->rcr_shadow_full++;
266844961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
266944961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
267044961713Sgirish 			"rcr_shadow_full", channel));
267144961713Sgirish 	}
267244961713Sgirish 	if (cs.bits.hdw.config_err) {
267344961713Sgirish 		rdc_stats->config_err++;
267444961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
267544961713Sgirish 					NXGE_FM_EREPORT_RDMC_CONFIG_ERR);
267644961713Sgirish 		rxchan_fatal = B_TRUE;
267744961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
267844961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
267944961713Sgirish 			"config error", channel));
268044961713Sgirish 	}
268144961713Sgirish 	if (cs.bits.hdw.rcrincon) {
268244961713Sgirish 		rdc_stats->rcrincon++;
268344961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
268444961713Sgirish 					NXGE_FM_EREPORT_RDMC_RCRINCON);
268544961713Sgirish 		rxchan_fatal = B_TRUE;
268644961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
268744961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
268844961713Sgirish 			"fatal error: rcrincon error", channel));
268944961713Sgirish 	}
269044961713Sgirish 	if (cs.bits.hdw.rcrfull) {
269144961713Sgirish 		rdc_stats->rcrfull++;
269244961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
269344961713Sgirish 					NXGE_FM_EREPORT_RDMC_RCRFULL);
269444961713Sgirish 		rxchan_fatal = B_TRUE;
269544961713Sgirish 		if (rdc_stats->rcrfull < error_disp_cnt)
269644961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
269744961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
269844961713Sgirish 			"fatal error: rcrfull error", channel));
269944961713Sgirish 	}
270044961713Sgirish 	if (cs.bits.hdw.rbr_empty) {
270144961713Sgirish 		rdc_stats->rbr_empty++;
270244961713Sgirish 		if (rdc_stats->rbr_empty < error_disp_cnt)
270344961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
270444961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
270544961713Sgirish 			"rbr empty error", channel));
270644961713Sgirish 	}
270744961713Sgirish 	if (cs.bits.hdw.rbrfull) {
270844961713Sgirish 		rdc_stats->rbrfull++;
270944961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
271044961713Sgirish 					NXGE_FM_EREPORT_RDMC_RBRFULL);
271144961713Sgirish 		rxchan_fatal = B_TRUE;
271244961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
271344961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
271444961713Sgirish 			"fatal error: rbr_full error", channel));
271544961713Sgirish 	}
271644961713Sgirish 	if (cs.bits.hdw.rbrlogpage) {
271744961713Sgirish 		rdc_stats->rbrlogpage++;
271844961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
271944961713Sgirish 					NXGE_FM_EREPORT_RDMC_RBRLOGPAGE);
272044961713Sgirish 		rxchan_fatal = B_TRUE;
272144961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
272244961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
272344961713Sgirish 			"fatal error: rbr logical page error", channel));
272444961713Sgirish 	}
272544961713Sgirish 	if (cs.bits.hdw.cfiglogpage) {
272644961713Sgirish 		rdc_stats->cfiglogpage++;
272744961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
272844961713Sgirish 					NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE);
272944961713Sgirish 		rxchan_fatal = B_TRUE;
273044961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
273144961713Sgirish 			"==> nxge_rx_err_evnts(channel %d): "
273244961713Sgirish 			"fatal error: cfig logical page error", channel));
273344961713Sgirish 	}
273444961713Sgirish 
273544961713Sgirish 	if (rxport_fatal)  {
273644961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
273744961713Sgirish 				" nxge_rx_err_evnts: "
273844961713Sgirish 				" fatal error on Port #%d\n",
273944961713Sgirish 				portn));
274044961713Sgirish 		status = nxge_ipp_fatal_err_recover(nxgep);
274144961713Sgirish 		if (status == NXGE_OK) {
274244961713Sgirish 			FM_SERVICE_RESTORED(nxgep);
274344961713Sgirish 		}
274444961713Sgirish 	}
274544961713Sgirish 
274644961713Sgirish 	if (rxchan_fatal) {
274744961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
274844961713Sgirish 				" nxge_rx_err_evnts: "
274944961713Sgirish 				" fatal error on Channel #%d\n",
275044961713Sgirish 				channel));
275144961713Sgirish 		status = nxge_rxdma_fatal_err_recover(nxgep, channel);
275244961713Sgirish 		if (status == NXGE_OK) {
275344961713Sgirish 			FM_SERVICE_RESTORED(nxgep);
275444961713Sgirish 		}
275544961713Sgirish 	}
275644961713Sgirish 
275744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts"));
275844961713Sgirish 
275944961713Sgirish 	return (status);
276044961713Sgirish }
276144961713Sgirish 
276244961713Sgirish static nxge_status_t
276344961713Sgirish nxge_map_rxdma(p_nxge_t nxgep)
276444961713Sgirish {
276544961713Sgirish 	int			i, ndmas;
276644961713Sgirish 	uint16_t		channel;
276744961713Sgirish 	p_rx_rbr_rings_t 	rx_rbr_rings;
276844961713Sgirish 	p_rx_rbr_ring_t		*rbr_rings;
276944961713Sgirish 	p_rx_rcr_rings_t 	rx_rcr_rings;
277044961713Sgirish 	p_rx_rcr_ring_t		*rcr_rings;
277144961713Sgirish 	p_rx_mbox_areas_t 	rx_mbox_areas_p;
277244961713Sgirish 	p_rx_mbox_t		*rx_mbox_p;
277344961713Sgirish 	p_nxge_dma_pool_t	dma_buf_poolp;
277444961713Sgirish 	p_nxge_dma_pool_t	dma_cntl_poolp;
277544961713Sgirish 	p_nxge_dma_common_t	*dma_buf_p;
277644961713Sgirish 	p_nxge_dma_common_t	*dma_cntl_p;
277744961713Sgirish 	uint32_t		*num_chunks;
277844961713Sgirish 	nxge_status_t		status = NXGE_OK;
277944961713Sgirish #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
278044961713Sgirish 	p_nxge_dma_common_t	t_dma_buf_p;
278144961713Sgirish 	p_nxge_dma_common_t	t_dma_cntl_p;
278244961713Sgirish #endif
278344961713Sgirish 
278444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma"));
278544961713Sgirish 
278644961713Sgirish 	dma_buf_poolp = nxgep->rx_buf_pool_p;
278744961713Sgirish 	dma_cntl_poolp = nxgep->rx_cntl_pool_p;
278844961713Sgirish 
278944961713Sgirish 	if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
279044961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
279144961713Sgirish 			"<== nxge_map_rxdma: buf not allocated"));
279244961713Sgirish 		return (NXGE_ERROR);
279344961713Sgirish 	}
279444961713Sgirish 
279544961713Sgirish 	ndmas = dma_buf_poolp->ndmas;
279644961713Sgirish 	if (!ndmas) {
279744961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
279844961713Sgirish 			"<== nxge_map_rxdma: no dma allocated"));
279944961713Sgirish 		return (NXGE_ERROR);
280044961713Sgirish 	}
280144961713Sgirish 
280244961713Sgirish 	num_chunks = dma_buf_poolp->num_chunks;
280344961713Sgirish 	dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
280444961713Sgirish 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
280544961713Sgirish 
280614ea4bb7Ssd 	rx_rbr_rings = (p_rx_rbr_rings_t)
280714ea4bb7Ssd 		KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP);
280814ea4bb7Ssd 	rbr_rings = (p_rx_rbr_ring_t *)
280914ea4bb7Ssd 		KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * ndmas, KM_SLEEP);
281044961713Sgirish 	rx_rcr_rings = (p_rx_rcr_rings_t)
281114ea4bb7Ssd 		KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP);
281214ea4bb7Ssd 	rcr_rings = (p_rx_rcr_ring_t *)
281314ea4bb7Ssd 		KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * ndmas, KM_SLEEP);
281444961713Sgirish 	rx_mbox_areas_p = (p_rx_mbox_areas_t)
281514ea4bb7Ssd 		KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP);
281614ea4bb7Ssd 	rx_mbox_p = (p_rx_mbox_t *)
281714ea4bb7Ssd 		KMEM_ZALLOC(sizeof (p_rx_mbox_t) * ndmas, KM_SLEEP);
281814ea4bb7Ssd 
281914ea4bb7Ssd 	/*
282014ea4bb7Ssd 	 * Timeout should be set based on the system clock divider.
282114ea4bb7Ssd 	 * The following timeout value of 1 assumes that the
282214ea4bb7Ssd 	 * granularity (1000) is 3 microseconds running at 300MHz.
282314ea4bb7Ssd 	 */
282414ea4bb7Ssd 
282514ea4bb7Ssd 	nxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT;
282614ea4bb7Ssd 	nxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT;
282744961713Sgirish 
282844961713Sgirish 	/*
282944961713Sgirish 	 * Map descriptors from the buffer polls for each dam channel.
283044961713Sgirish 	 */
283144961713Sgirish 	for (i = 0; i < ndmas; i++) {
283244961713Sgirish 		/*
283344961713Sgirish 		 * Set up and prepare buffer blocks, descriptors
283444961713Sgirish 		 * and mailbox.
283544961713Sgirish 		 */
283644961713Sgirish 		channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel;
283744961713Sgirish 		status = nxge_map_rxdma_channel(nxgep, channel,
283844961713Sgirish 				(p_nxge_dma_common_t *)&dma_buf_p[i],
283944961713Sgirish 				(p_rx_rbr_ring_t *)&rbr_rings[i],
284044961713Sgirish 				num_chunks[i],
284144961713Sgirish 				(p_nxge_dma_common_t *)&dma_cntl_p[i],
284244961713Sgirish 				(p_rx_rcr_ring_t *)&rcr_rings[i],
284344961713Sgirish 				(p_rx_mbox_t *)&rx_mbox_p[i]);
284444961713Sgirish 		if (status != NXGE_OK) {
284544961713Sgirish 			goto nxge_map_rxdma_fail1;
284644961713Sgirish 		}
284744961713Sgirish 		rbr_rings[i]->index = (uint16_t)i;
284844961713Sgirish 		rcr_rings[i]->index = (uint16_t)i;
284944961713Sgirish 		rcr_rings[i]->rdc_stats = &nxgep->statsp->rdc_stats[i];
285044961713Sgirish 
285144961713Sgirish #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
285244961713Sgirish 		if (nxgep->niu_type == N2_NIU && NXGE_DMA_BLOCK == 1) {
285344961713Sgirish 			rbr_rings[i]->hv_set = B_FALSE;
285444961713Sgirish 			t_dma_buf_p = (p_nxge_dma_common_t)dma_buf_p[i];
285544961713Sgirish 			t_dma_cntl_p =
285644961713Sgirish 				(p_nxge_dma_common_t)dma_cntl_p[i];
285744961713Sgirish 
285844961713Sgirish 			rbr_rings[i]->hv_rx_buf_base_ioaddr_pp =
285944961713Sgirish 				(uint64_t)t_dma_buf_p->orig_ioaddr_pp;
286044961713Sgirish 			rbr_rings[i]->hv_rx_buf_ioaddr_size =
286144961713Sgirish 				(uint64_t)t_dma_buf_p->orig_alength;
286244961713Sgirish 			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
286344961713Sgirish 				"==> nxge_map_rxdma_channel: "
286444961713Sgirish 				"channel %d "
286544961713Sgirish 				"data buf base io $%p ($%p) "
286644961713Sgirish 				"size 0x%llx (%d 0x%x)",
286744961713Sgirish 				channel,
286844961713Sgirish 				rbr_rings[i]->hv_rx_buf_base_ioaddr_pp,
286944961713Sgirish 				t_dma_cntl_p->ioaddr_pp,
287044961713Sgirish 				rbr_rings[i]->hv_rx_buf_ioaddr_size,
287144961713Sgirish 				t_dma_buf_p->orig_alength,
287244961713Sgirish 				t_dma_buf_p->orig_alength));
287344961713Sgirish 
287444961713Sgirish 			rbr_rings[i]->hv_rx_cntl_base_ioaddr_pp =
287544961713Sgirish 				(uint64_t)t_dma_cntl_p->orig_ioaddr_pp;
287644961713Sgirish 			rbr_rings[i]->hv_rx_cntl_ioaddr_size =
287744961713Sgirish 				(uint64_t)t_dma_cntl_p->orig_alength;
287844961713Sgirish 			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
287944961713Sgirish 				"==> nxge_map_rxdma_channel: "
288044961713Sgirish 				"channel %d "
288144961713Sgirish 				"cntl base io $%p ($%p) "
288244961713Sgirish 				"size 0x%llx (%d 0x%x)",
288344961713Sgirish 				channel,
288444961713Sgirish 				rbr_rings[i]->hv_rx_cntl_base_ioaddr_pp,
288544961713Sgirish 				t_dma_cntl_p->ioaddr_pp,
288644961713Sgirish 				rbr_rings[i]->hv_rx_cntl_ioaddr_size,
288744961713Sgirish 				t_dma_cntl_p->orig_alength,
288844961713Sgirish 				t_dma_cntl_p->orig_alength));
288944961713Sgirish 		}
289044961713Sgirish 
289144961713Sgirish #endif	/* sun4v and NIU_LP_WORKAROUND */
289244961713Sgirish 	}
289344961713Sgirish 
289444961713Sgirish 	rx_rbr_rings->ndmas = rx_rcr_rings->ndmas = ndmas;
289544961713Sgirish 	rx_rbr_rings->rbr_rings = rbr_rings;
289644961713Sgirish 	nxgep->rx_rbr_rings = rx_rbr_rings;
289744961713Sgirish 	rx_rcr_rings->rcr_rings = rcr_rings;
289844961713Sgirish 	nxgep->rx_rcr_rings = rx_rcr_rings;
289944961713Sgirish 
290044961713Sgirish 	rx_mbox_areas_p->rxmbox_areas = rx_mbox_p;
290144961713Sgirish 	nxgep->rx_mbox_areas_p = rx_mbox_areas_p;
290244961713Sgirish 
290344961713Sgirish 	goto nxge_map_rxdma_exit;
290444961713Sgirish 
290544961713Sgirish nxge_map_rxdma_fail1:
290644961713Sgirish 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
290744961713Sgirish 		"==> nxge_map_rxdma: unmap rbr,rcr "
290844961713Sgirish 		"(status 0x%x channel %d i %d)",
290944961713Sgirish 		status, channel, i));
291056d930aeSspeer 	i--;
291144961713Sgirish 	for (; i >= 0; i--) {
291244961713Sgirish 		channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel;
291344961713Sgirish 		nxge_unmap_rxdma_channel(nxgep, channel,
291444961713Sgirish 			rbr_rings[i],
291544961713Sgirish 			rcr_rings[i],
291644961713Sgirish 			rx_mbox_p[i]);
291744961713Sgirish 	}
291844961713Sgirish 
291944961713Sgirish 	KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas);
292044961713Sgirish 	KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t));
292144961713Sgirish 	KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas);
292244961713Sgirish 	KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t));
292344961713Sgirish 	KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas);
292444961713Sgirish 	KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t));
292544961713Sgirish 
292644961713Sgirish nxge_map_rxdma_exit:
292744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
292844961713Sgirish 		"<== nxge_map_rxdma: "
292944961713Sgirish 		"(status 0x%x channel %d)",
293044961713Sgirish 		status, channel));
293144961713Sgirish 
293244961713Sgirish 	return (status);
293344961713Sgirish }
293444961713Sgirish 
293544961713Sgirish static void
293644961713Sgirish nxge_unmap_rxdma(p_nxge_t nxgep)
293744961713Sgirish {
293844961713Sgirish 	int			i, ndmas;
293944961713Sgirish 	uint16_t		channel;
294044961713Sgirish 	p_rx_rbr_rings_t 	rx_rbr_rings;
294144961713Sgirish 	p_rx_rbr_ring_t		*rbr_rings;
294244961713Sgirish 	p_rx_rcr_rings_t 	rx_rcr_rings;
294344961713Sgirish 	p_rx_rcr_ring_t		*rcr_rings;
294444961713Sgirish 	p_rx_mbox_areas_t 	rx_mbox_areas_p;
294544961713Sgirish 	p_rx_mbox_t		*rx_mbox_p;
294644961713Sgirish 	p_nxge_dma_pool_t	dma_buf_poolp;
294744961713Sgirish 	p_nxge_dma_pool_t	dma_cntl_poolp;
294844961713Sgirish 	p_nxge_dma_common_t	*dma_buf_p;
294944961713Sgirish 
295044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma"));
295144961713Sgirish 
295244961713Sgirish 	dma_buf_poolp = nxgep->rx_buf_pool_p;
295344961713Sgirish 	dma_cntl_poolp = nxgep->rx_cntl_pool_p;
295444961713Sgirish 
295544961713Sgirish 	if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
295644961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
295744961713Sgirish 			"<== nxge_unmap_rxdma: NULL buf pointers"));
295844961713Sgirish 		return;
295944961713Sgirish 	}
296044961713Sgirish 
296144961713Sgirish 	rx_rbr_rings = nxgep->rx_rbr_rings;
296244961713Sgirish 	rx_rcr_rings = nxgep->rx_rcr_rings;
296344961713Sgirish 	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
296444961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
296544961713Sgirish 			"<== nxge_unmap_rxdma: NULL ring pointers"));
296644961713Sgirish 		return;
296744961713Sgirish 	}
296844961713Sgirish 	ndmas = rx_rbr_rings->ndmas;
296944961713Sgirish 	if (!ndmas) {
297044961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
297144961713Sgirish 			"<== nxge_unmap_rxdma: no channel"));
297244961713Sgirish 		return;
297344961713Sgirish 	}
297444961713Sgirish 
297544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
297644961713Sgirish 		"==> nxge_unmap_rxdma (ndmas %d)", ndmas));
297744961713Sgirish 	rbr_rings = rx_rbr_rings->rbr_rings;
297844961713Sgirish 	rcr_rings = rx_rcr_rings->rcr_rings;
297944961713Sgirish 	rx_mbox_areas_p = nxgep->rx_mbox_areas_p;
298044961713Sgirish 	rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
298144961713Sgirish 	dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
298244961713Sgirish 
298344961713Sgirish 	for (i = 0; i < ndmas; i++) {
298444961713Sgirish 		channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel;
298544961713Sgirish 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
298644961713Sgirish 			"==> nxge_unmap_rxdma (ndmas %d) channel %d",
298744961713Sgirish 				ndmas, channel));
298844961713Sgirish 		(void) nxge_unmap_rxdma_channel(nxgep, channel,
298944961713Sgirish 				(p_rx_rbr_ring_t)rbr_rings[i],
299044961713Sgirish 				(p_rx_rcr_ring_t)rcr_rings[i],
299144961713Sgirish 				(p_rx_mbox_t)rx_mbox_p[i]);
299244961713Sgirish 	}
299344961713Sgirish 
299444961713Sgirish 	KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t));
299544961713Sgirish 	KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas);
299644961713Sgirish 	KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t));
299744961713Sgirish 	KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas);
299844961713Sgirish 	KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t));
299944961713Sgirish 	KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas);
300044961713Sgirish 
300144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
300244961713Sgirish 		"<== nxge_unmap_rxdma"));
300344961713Sgirish }
300444961713Sgirish 
300544961713Sgirish nxge_status_t
300644961713Sgirish nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
300744961713Sgirish     p_nxge_dma_common_t *dma_buf_p,  p_rx_rbr_ring_t *rbr_p,
300844961713Sgirish     uint32_t num_chunks,
300944961713Sgirish     p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p,
301044961713Sgirish     p_rx_mbox_t *rx_mbox_p)
301144961713Sgirish {
301244961713Sgirish 	int	status = NXGE_OK;
301344961713Sgirish 
301444961713Sgirish 	/*
301544961713Sgirish 	 * Set up and prepare buffer blocks, descriptors
301644961713Sgirish 	 * and mailbox.
301744961713Sgirish 	 */
301844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
301944961713Sgirish 		"==> nxge_map_rxdma_channel (channel %d)", channel));
302044961713Sgirish 	/*
302144961713Sgirish 	 * Receive buffer blocks
302244961713Sgirish 	 */
302344961713Sgirish 	status = nxge_map_rxdma_channel_buf_ring(nxgep, channel,
302444961713Sgirish 			dma_buf_p, rbr_p, num_chunks);
302544961713Sgirish 	if (status != NXGE_OK) {
302644961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
302744961713Sgirish 			"==> nxge_map_rxdma_channel (channel %d): "
302844961713Sgirish 			"map buffer failed 0x%x", channel, status));
302944961713Sgirish 		goto nxge_map_rxdma_channel_exit;
303044961713Sgirish 	}
303144961713Sgirish 
303244961713Sgirish 	/*
303344961713Sgirish 	 * Receive block ring, completion ring and mailbox.
303444961713Sgirish 	 */
303544961713Sgirish 	status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel,
303644961713Sgirish 			dma_cntl_p, rbr_p, rcr_p, rx_mbox_p);
303744961713Sgirish 	if (status != NXGE_OK) {
303844961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
303944961713Sgirish 			"==> nxge_map_rxdma_channel (channel %d): "
304044961713Sgirish 			"map config failed 0x%x", channel, status));
304144961713Sgirish 		goto nxge_map_rxdma_channel_fail2;
304244961713Sgirish 	}
304344961713Sgirish 
304444961713Sgirish 	goto nxge_map_rxdma_channel_exit;
304544961713Sgirish 
304644961713Sgirish nxge_map_rxdma_channel_fail3:
304744961713Sgirish 	/* Free rbr, rcr */
304844961713Sgirish 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
304944961713Sgirish 		"==> nxge_map_rxdma_channel: free rbr/rcr "
305044961713Sgirish 		"(status 0x%x channel %d)",
305144961713Sgirish 		status, channel));
305244961713Sgirish 	nxge_unmap_rxdma_channel_cfg_ring(nxgep,
305344961713Sgirish 		*rcr_p, *rx_mbox_p);
305444961713Sgirish 
305544961713Sgirish nxge_map_rxdma_channel_fail2:
305644961713Sgirish 	/* Free buffer blocks */
305744961713Sgirish 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
305844961713Sgirish 		"==> nxge_map_rxdma_channel: free rx buffers"
305944961713Sgirish 		"(nxgep 0x%x status 0x%x channel %d)",
306044961713Sgirish 		nxgep, status, channel));
306144961713Sgirish 	nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p);
306244961713Sgirish 
306356d930aeSspeer 	status = NXGE_ERROR;
306456d930aeSspeer 
306544961713Sgirish nxge_map_rxdma_channel_exit:
306644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
306744961713Sgirish 		"<== nxge_map_rxdma_channel: "
306844961713Sgirish 		"(nxgep 0x%x status 0x%x channel %d)",
306944961713Sgirish 		nxgep, status, channel));
307044961713Sgirish 
307144961713Sgirish 	return (status);
307244961713Sgirish }
307344961713Sgirish 
307444961713Sgirish /*ARGSUSED*/
307544961713Sgirish static void
307644961713Sgirish nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
307744961713Sgirish     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
307844961713Sgirish {
307944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
308044961713Sgirish 		"==> nxge_unmap_rxdma_channel (channel %d)", channel));
308144961713Sgirish 
308244961713Sgirish 	/*
308344961713Sgirish 	 * unmap receive block ring, completion ring and mailbox.
308444961713Sgirish 	 */
308544961713Sgirish 	(void) nxge_unmap_rxdma_channel_cfg_ring(nxgep,
308644961713Sgirish 			rcr_p, rx_mbox_p);
308744961713Sgirish 
308844961713Sgirish 	/* unmap buffer blocks */
308944961713Sgirish 	(void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p);
309044961713Sgirish 
309144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel"));
309244961713Sgirish }
309344961713Sgirish 
309444961713Sgirish /*ARGSUSED*/
309544961713Sgirish static nxge_status_t
309644961713Sgirish nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel,
309744961713Sgirish     p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p,
309844961713Sgirish     p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p)
309944961713Sgirish {
310044961713Sgirish 	p_rx_rbr_ring_t 	rbrp;
310144961713Sgirish 	p_rx_rcr_ring_t 	rcrp;
310244961713Sgirish 	p_rx_mbox_t 		mboxp;
310344961713Sgirish 	p_nxge_dma_common_t 	cntl_dmap;
310444961713Sgirish 	p_nxge_dma_common_t 	dmap;
310544961713Sgirish 	p_rx_msg_t 		*rx_msg_ring;
310644961713Sgirish 	p_rx_msg_t 		rx_msg_p;
310744961713Sgirish 	p_rbr_cfig_a_t		rcfga_p;
310844961713Sgirish 	p_rbr_cfig_b_t		rcfgb_p;
310944961713Sgirish 	p_rcrcfig_a_t		cfga_p;
311044961713Sgirish 	p_rcrcfig_b_t		cfgb_p;
311144961713Sgirish 	p_rxdma_cfig1_t		cfig1_p;
311244961713Sgirish 	p_rxdma_cfig2_t		cfig2_p;
311344961713Sgirish 	p_rbr_kick_t		kick_p;
311444961713Sgirish 	uint32_t		dmaaddrp;
311544961713Sgirish 	uint32_t		*rbr_vaddrp;
311644961713Sgirish 	uint32_t		bkaddr;
311744961713Sgirish 	nxge_status_t		status = NXGE_OK;
311844961713Sgirish 	int			i;
311944961713Sgirish 	uint32_t 		nxge_port_rcr_size;
312044961713Sgirish 
312144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
312244961713Sgirish 		"==> nxge_map_rxdma_channel_cfg_ring"));
312344961713Sgirish 
312444961713Sgirish 	cntl_dmap = *dma_cntl_p;
312544961713Sgirish 
312644961713Sgirish 	/* Map in the receive block ring */
312744961713Sgirish 	rbrp = *rbr_p;
312844961713Sgirish 	dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc;
312944961713Sgirish 	nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4);
313044961713Sgirish 	/*
313144961713Sgirish 	 * Zero out buffer block ring descriptors.
313244961713Sgirish 	 */
313344961713Sgirish 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
313444961713Sgirish 
313544961713Sgirish 	rcfga_p = &(rbrp->rbr_cfga);
313644961713Sgirish 	rcfgb_p = &(rbrp->rbr_cfgb);
313744961713Sgirish 	kick_p = &(rbrp->rbr_kick);
313844961713Sgirish 	rcfga_p->value = 0;
313944961713Sgirish 	rcfgb_p->value = 0;
314044961713Sgirish 	kick_p->value = 0;
314144961713Sgirish 	rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress;
314244961713Sgirish 	rcfga_p->value = (rbrp->rbr_addr &
314344961713Sgirish 				(RBR_CFIG_A_STDADDR_MASK |
314444961713Sgirish 				RBR_CFIG_A_STDADDR_BASE_MASK));
314544961713Sgirish 	rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT);
314644961713Sgirish 
314744961713Sgirish 	rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0;
314844961713Sgirish 	rcfgb_p->bits.ldw.vld0 = 1;
314944961713Sgirish 	rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1;
315044961713Sgirish 	rcfgb_p->bits.ldw.vld1 = 1;
315144961713Sgirish 	rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2;
315244961713Sgirish 	rcfgb_p->bits.ldw.vld2 = 1;
315344961713Sgirish 	rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code;
315444961713Sgirish 
315544961713Sgirish 	/*
315644961713Sgirish 	 * For each buffer block, enter receive block address to the ring.
315744961713Sgirish 	 */
315844961713Sgirish 	rbr_vaddrp = (uint32_t *)dmap->kaddrp;
315944961713Sgirish 	rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp;
316044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
316144961713Sgirish 		"==> nxge_map_rxdma_channel_cfg_ring: channel %d "
316244961713Sgirish 		"rbr_vaddrp $%p", dma_channel, rbr_vaddrp));
316344961713Sgirish 
316444961713Sgirish 	rx_msg_ring = rbrp->rx_msg_ring;
316544961713Sgirish 	for (i = 0; i < rbrp->tnblocks; i++) {
316644961713Sgirish 		rx_msg_p = rx_msg_ring[i];
316744961713Sgirish 		rx_msg_p->nxgep = nxgep;
316844961713Sgirish 		rx_msg_p->rx_rbr_p = rbrp;
316944961713Sgirish 		bkaddr = (uint32_t)
317044961713Sgirish 			((rx_msg_p->buf_dma.dma_cookie.dmac_laddress
317144961713Sgirish 				>> RBR_BKADDR_SHIFT));
317244961713Sgirish 		rx_msg_p->free = B_FALSE;
317344961713Sgirish 		rx_msg_p->max_usage_cnt = 0xbaddcafe;
317444961713Sgirish 
317544961713Sgirish 		*rbr_vaddrp++ = bkaddr;
317644961713Sgirish 	}
317744961713Sgirish 
317844961713Sgirish 	kick_p->bits.ldw.bkadd = rbrp->rbb_max;
317944961713Sgirish 	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
318044961713Sgirish 
318144961713Sgirish 	rbrp->rbr_rd_index = 0;
318244961713Sgirish 
318344961713Sgirish 	rbrp->rbr_consumed = 0;
318444961713Sgirish 	rbrp->rbr_use_bcopy = B_TRUE;
318544961713Sgirish 	rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0;
318644961713Sgirish 	/*
318744961713Sgirish 	 * Do bcopy on packets greater than bcopy size once
318844961713Sgirish 	 * the lo threshold is reached.
318944961713Sgirish 	 * This lo threshold should be less than the hi threshold.
319044961713Sgirish 	 *
319144961713Sgirish 	 * Do bcopy on every packet once the hi threshold is reached.
319244961713Sgirish 	 */
319344961713Sgirish 	if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) {
319444961713Sgirish 		/* default it to use hi */
319544961713Sgirish 		nxge_rx_threshold_lo = nxge_rx_threshold_hi;
319644961713Sgirish 	}
319744961713Sgirish 
319844961713Sgirish 	if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) {
319944961713Sgirish 		nxge_rx_buf_size_type = NXGE_RBR_TYPE2;
320044961713Sgirish 	}
320144961713Sgirish 	rbrp->rbr_bufsize_type = nxge_rx_buf_size_type;
320244961713Sgirish 
320344961713Sgirish 	switch (nxge_rx_threshold_hi) {
320444961713Sgirish 	default:
320544961713Sgirish 	case	NXGE_RX_COPY_NONE:
320644961713Sgirish 		/* Do not do bcopy at all */
320744961713Sgirish 		rbrp->rbr_use_bcopy = B_FALSE;
320844961713Sgirish 		rbrp->rbr_threshold_hi = rbrp->rbb_max;
320944961713Sgirish 		break;
321044961713Sgirish 
321144961713Sgirish 	case NXGE_RX_COPY_1:
321244961713Sgirish 	case NXGE_RX_COPY_2:
321344961713Sgirish 	case NXGE_RX_COPY_3:
321444961713Sgirish 	case NXGE_RX_COPY_4:
321544961713Sgirish 	case NXGE_RX_COPY_5:
321644961713Sgirish 	case NXGE_RX_COPY_6:
321744961713Sgirish 	case NXGE_RX_COPY_7:
321844961713Sgirish 		rbrp->rbr_threshold_hi =
321944961713Sgirish 			rbrp->rbb_max *
322044961713Sgirish 			(nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE;
322144961713Sgirish 		break;
322244961713Sgirish 
322344961713Sgirish 	case NXGE_RX_COPY_ALL:
322444961713Sgirish 		rbrp->rbr_threshold_hi = 0;
322544961713Sgirish 		break;
322644961713Sgirish 	}
322744961713Sgirish 
322844961713Sgirish 	switch (nxge_rx_threshold_lo) {
322944961713Sgirish 	default:
323044961713Sgirish 	case	NXGE_RX_COPY_NONE:
323144961713Sgirish 		/* Do not do bcopy at all */
323244961713Sgirish 		if (rbrp->rbr_use_bcopy) {
323344961713Sgirish 			rbrp->rbr_use_bcopy = B_FALSE;
323444961713Sgirish 		}
323544961713Sgirish 		rbrp->rbr_threshold_lo = rbrp->rbb_max;
323644961713Sgirish 		break;
323744961713Sgirish 
323844961713Sgirish 	case NXGE_RX_COPY_1:
323944961713Sgirish 	case NXGE_RX_COPY_2:
324044961713Sgirish 	case NXGE_RX_COPY_3:
324144961713Sgirish 	case NXGE_RX_COPY_4:
324244961713Sgirish 	case NXGE_RX_COPY_5:
324344961713Sgirish 	case NXGE_RX_COPY_6:
324444961713Sgirish 	case NXGE_RX_COPY_7:
324544961713Sgirish 		rbrp->rbr_threshold_lo =
324644961713Sgirish 			rbrp->rbb_max *
324744961713Sgirish 			(nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE;
324844961713Sgirish 		break;
324944961713Sgirish 
325044961713Sgirish 	case NXGE_RX_COPY_ALL:
325144961713Sgirish 		rbrp->rbr_threshold_lo = 0;
325244961713Sgirish 		break;
325344961713Sgirish 	}
325444961713Sgirish 
325544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
325644961713Sgirish 		"nxge_map_rxdma_channel_cfg_ring: channel %d "
325744961713Sgirish 		"rbb_max %d "
325844961713Sgirish 		"rbrp->rbr_bufsize_type %d "
325944961713Sgirish 		"rbb_threshold_hi %d "
326044961713Sgirish 		"rbb_threshold_lo %d",
326144961713Sgirish 		dma_channel,
326244961713Sgirish 		rbrp->rbb_max,
326344961713Sgirish 		rbrp->rbr_bufsize_type,
326444961713Sgirish 		rbrp->rbr_threshold_hi,
326544961713Sgirish 		rbrp->rbr_threshold_lo));
326644961713Sgirish 
326744961713Sgirish 	rbrp->page_valid.value = 0;
326844961713Sgirish 	rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0;
326944961713Sgirish 	rbrp->page_value_1.value = rbrp->page_value_2.value = 0;
327044961713Sgirish 	rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0;
327144961713Sgirish 	rbrp->page_hdl.value = 0;
327244961713Sgirish 
327344961713Sgirish 	rbrp->page_valid.bits.ldw.page0 = 1;
327444961713Sgirish 	rbrp->page_valid.bits.ldw.page1 = 1;
327544961713Sgirish 
327644961713Sgirish 	/* Map in the receive completion ring */
327744961713Sgirish 	rcrp = (p_rx_rcr_ring_t)
327814ea4bb7Ssd 		KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP);
327944961713Sgirish 	rcrp->rdc = dma_channel;
328044961713Sgirish 
328144961713Sgirish 	nxge_port_rcr_size = nxgep->nxge_port_rcr_size;
328244961713Sgirish 	rcrp->comp_size = nxge_port_rcr_size;
328344961713Sgirish 	rcrp->comp_wrap_mask = nxge_port_rcr_size - 1;
328444961713Sgirish 
328544961713Sgirish 	rcrp->max_receive_pkts = nxge_max_rx_pkts;
328644961713Sgirish 
328744961713Sgirish 	dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
328844961713Sgirish 	nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size,
328944961713Sgirish 			sizeof (rcr_entry_t));
329044961713Sgirish 	rcrp->comp_rd_index = 0;
329144961713Sgirish 	rcrp->comp_wt_index = 0;
329244961713Sgirish 	rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
329344961713Sgirish 		(p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
329444961713Sgirish 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
329544961713Sgirish 		(p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
329644961713Sgirish 
329744961713Sgirish 	rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
329844961713Sgirish 			(nxge_port_rcr_size - 1);
329944961713Sgirish 	rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
330044961713Sgirish 			(nxge_port_rcr_size - 1);
330144961713Sgirish 
330244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
330344961713Sgirish 		"==> nxge_map_rxdma_channel_cfg_ring: "
330444961713Sgirish 		"channel %d "
330544961713Sgirish 		"rbr_vaddrp $%p "
330644961713Sgirish 		"rcr_desc_rd_head_p $%p "
330744961713Sgirish 		"rcr_desc_rd_head_pp $%p "
330844961713Sgirish 		"rcr_desc_rd_last_p $%p "
330944961713Sgirish 		"rcr_desc_rd_last_pp $%p ",
331044961713Sgirish 		dma_channel,
331144961713Sgirish 		rbr_vaddrp,
331244961713Sgirish 		rcrp->rcr_desc_rd_head_p,
331344961713Sgirish 		rcrp->rcr_desc_rd_head_pp,
331444961713Sgirish 		rcrp->rcr_desc_last_p,
331544961713Sgirish 		rcrp->rcr_desc_last_pp));
331644961713Sgirish 
331744961713Sgirish 	/*
331844961713Sgirish 	 * Zero out buffer block ring descriptors.
331944961713Sgirish 	 */
332044961713Sgirish 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
332114ea4bb7Ssd 	rcrp->intr_timeout = nxgep->intr_timeout;
332214ea4bb7Ssd 	rcrp->intr_threshold = nxgep->intr_threshold;
332344961713Sgirish 	rcrp->full_hdr_flag = B_FALSE;
332444961713Sgirish 	rcrp->sw_priv_hdr_len = 0;
332544961713Sgirish 
332644961713Sgirish 	cfga_p = &(rcrp->rcr_cfga);
332744961713Sgirish 	cfgb_p = &(rcrp->rcr_cfgb);
332844961713Sgirish 	cfga_p->value = 0;
332944961713Sgirish 	cfgb_p->value = 0;
333044961713Sgirish 	rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress;
333144961713Sgirish 	cfga_p->value = (rcrp->rcr_addr &
333244961713Sgirish 			    (RCRCFIG_A_STADDR_MASK |
333344961713Sgirish 			    RCRCFIG_A_STADDR_BASE_MASK));
333444961713Sgirish 
333544961713Sgirish 	rcfga_p->value |= ((uint64_t)rcrp->comp_size <<
333644961713Sgirish 				RCRCFIG_A_LEN_SHIF);
333744961713Sgirish 
333844961713Sgirish 	/*
333944961713Sgirish 	 * Timeout should be set based on the system clock divider.
334044961713Sgirish 	 * The following timeout value of 1 assumes that the
334144961713Sgirish 	 * granularity (1000) is 3 microseconds running at 300MHz.
334244961713Sgirish 	 */
334314ea4bb7Ssd 	cfgb_p->bits.ldw.pthres = rcrp->intr_threshold;
334414ea4bb7Ssd 	cfgb_p->bits.ldw.timeout = rcrp->intr_timeout;
334544961713Sgirish 	cfgb_p->bits.ldw.entout = 1;
334644961713Sgirish 
334744961713Sgirish 	/* Map in the mailbox */
334844961713Sgirish 	mboxp = (p_rx_mbox_t)
334944961713Sgirish 			KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP);
335044961713Sgirish 	dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox;
335144961713Sgirish 	nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t));
335244961713Sgirish 	cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1;
335344961713Sgirish 	cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2;
335444961713Sgirish 	cfig1_p->value = cfig2_p->value = 0;
335544961713Sgirish 
335644961713Sgirish 	mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress;
335744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
335844961713Sgirish 		"==> nxge_map_rxdma_channel_cfg_ring: "
335944961713Sgirish 		"channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx",
336044961713Sgirish 		dma_channel, cfig1_p->value, cfig2_p->value,
336144961713Sgirish 		mboxp->mbox_addr));
336244961713Sgirish 
336344961713Sgirish 	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32
336444961713Sgirish 			& 0xfff);
336544961713Sgirish 	cfig1_p->bits.ldw.mbaddr_h = dmaaddrp;
336644961713Sgirish 
336744961713Sgirish 
336844961713Sgirish 	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff);
336944961713Sgirish 	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress &
337044961713Sgirish 				RXDMA_CFIG2_MBADDR_L_MASK);
337144961713Sgirish 
337244961713Sgirish 	cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT);
337344961713Sgirish 
337444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
337544961713Sgirish 		"==> nxge_map_rxdma_channel_cfg_ring: "
337644961713Sgirish 		"channel %d damaddrp $%p "
337744961713Sgirish 		"cfg1 0x%016llx cfig2 0x%016llx",
337844961713Sgirish 		dma_channel, dmaaddrp,
337944961713Sgirish 		cfig1_p->value, cfig2_p->value));
338044961713Sgirish 
338144961713Sgirish 	cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag;
338244961713Sgirish 	cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len;
338344961713Sgirish 
338444961713Sgirish 	rbrp->rx_rcr_p = rcrp;
338544961713Sgirish 	rcrp->rx_rbr_p = rbrp;
338644961713Sgirish 	*rcr_p = rcrp;
338744961713Sgirish 	*rx_mbox_p = mboxp;
338844961713Sgirish 
338944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
339044961713Sgirish 		"<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status));
339144961713Sgirish 
339244961713Sgirish 	return (status);
339344961713Sgirish }
339444961713Sgirish 
339544961713Sgirish /*ARGSUSED*/
339644961713Sgirish static void
339744961713Sgirish nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep,
339844961713Sgirish     p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
339944961713Sgirish {
340044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
340144961713Sgirish 		"==> nxge_unmap_rxdma_channel_cfg_ring: channel %d",
340244961713Sgirish 		rcr_p->rdc));
340344961713Sgirish 
340444961713Sgirish 	KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t));
340544961713Sgirish 	KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t));
340644961713Sgirish 
340744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
340844961713Sgirish 		"<== nxge_unmap_rxdma_channel_cfg_ring"));
340944961713Sgirish }
341044961713Sgirish 
341144961713Sgirish static nxge_status_t
341244961713Sgirish nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel,
341344961713Sgirish     p_nxge_dma_common_t *dma_buf_p,
341444961713Sgirish     p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks)
341544961713Sgirish {
341644961713Sgirish 	p_rx_rbr_ring_t 	rbrp;
341744961713Sgirish 	p_nxge_dma_common_t 	dma_bufp, tmp_bufp;
341844961713Sgirish 	p_rx_msg_t 		*rx_msg_ring;
341944961713Sgirish 	p_rx_msg_t 		rx_msg_p;
342044961713Sgirish 	p_mblk_t 		mblk_p;
342144961713Sgirish 
342244961713Sgirish 	rxring_info_t *ring_info;
342344961713Sgirish 	nxge_status_t		status = NXGE_OK;
342444961713Sgirish 	int			i, j, index;
342544961713Sgirish 	uint32_t		size, bsize, nblocks, nmsgs;
342644961713Sgirish 
342744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
342844961713Sgirish 		"==> nxge_map_rxdma_channel_buf_ring: channel %d",
342944961713Sgirish 		channel));
343044961713Sgirish 
343144961713Sgirish 	dma_bufp = tmp_bufp = *dma_buf_p;
343244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
343344961713Sgirish 		" nxge_map_rxdma_channel_buf_ring: channel %d to map %d "
343444961713Sgirish 		"chunks bufp 0x%016llx",
343544961713Sgirish 		channel, num_chunks, dma_bufp));
343644961713Sgirish 
343744961713Sgirish 	nmsgs = 0;
343844961713Sgirish 	for (i = 0; i < num_chunks; i++, tmp_bufp++) {
343944961713Sgirish 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
344044961713Sgirish 			"==> nxge_map_rxdma_channel_buf_ring: channel %d "
344144961713Sgirish 			"bufp 0x%016llx nblocks %d nmsgs %d",
344244961713Sgirish 			channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
344344961713Sgirish 		nmsgs += tmp_bufp->nblocks;
344444961713Sgirish 	}
344544961713Sgirish 	if (!nmsgs) {
344656d930aeSspeer 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
344744961713Sgirish 			"<== nxge_map_rxdma_channel_buf_ring: channel %d "
344844961713Sgirish 			"no msg blocks",
344944961713Sgirish 			channel));
345044961713Sgirish 		status = NXGE_ERROR;
345144961713Sgirish 		goto nxge_map_rxdma_channel_buf_ring_exit;
345244961713Sgirish 	}
345344961713Sgirish 
345444961713Sgirish 	rbrp = (p_rx_rbr_ring_t)
345544961713Sgirish 		KMEM_ZALLOC(sizeof (rx_rbr_ring_t), KM_SLEEP);
345644961713Sgirish 
345744961713Sgirish 	size = nmsgs * sizeof (p_rx_msg_t);
345844961713Sgirish 	rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
345944961713Sgirish 	ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t),
346044961713Sgirish 		KM_SLEEP);
346144961713Sgirish 
346244961713Sgirish 	MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER,
346344961713Sgirish 				(void *)nxgep->interrupt_cookie);
346444961713Sgirish 	MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER,
346544961713Sgirish 				(void *)nxgep->interrupt_cookie);
346644961713Sgirish 	rbrp->rdc = channel;
346744961713Sgirish 	rbrp->num_blocks = num_chunks;
346844961713Sgirish 	rbrp->tnblocks = nmsgs;
346944961713Sgirish 	rbrp->rbb_max = nmsgs;
347044961713Sgirish 	rbrp->rbr_max_size = nmsgs;
347144961713Sgirish 	rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1);
347244961713Sgirish 
347344961713Sgirish 	/*
347444961713Sgirish 	 * Buffer sizes suggested by NIU architect.
347544961713Sgirish 	 * 256, 512 and 2K.
347644961713Sgirish 	 */
347744961713Sgirish 
347844961713Sgirish 	rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B;
347944961713Sgirish 	rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES;
348044961713Sgirish 	rbrp->npi_pkt_buf_size0 = SIZE_256B;
348144961713Sgirish 
348244961713Sgirish 	rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K;
348344961713Sgirish 	rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES;
348444961713Sgirish 	rbrp->npi_pkt_buf_size1 = SIZE_1KB;
348544961713Sgirish 
348644961713Sgirish 	rbrp->block_size = nxgep->rx_default_block_size;
348744961713Sgirish 
348814ea4bb7Ssd 	if (!nxge_jumbo_enable && !nxgep->param_arr[param_accept_jumbo].value) {
348944961713Sgirish 		rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K;
349044961713Sgirish 		rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES;
349144961713Sgirish 		rbrp->npi_pkt_buf_size2 = SIZE_2KB;
349244961713Sgirish 	} else {
349344961713Sgirish 		if (rbrp->block_size >= 0x2000) {
349444961713Sgirish 			rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K;
349544961713Sgirish 			rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES;
349644961713Sgirish 			rbrp->npi_pkt_buf_size2 = SIZE_8KB;
349744961713Sgirish 		} else {
349844961713Sgirish 			rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K;
349944961713Sgirish 			rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES;
350044961713Sgirish 			rbrp->npi_pkt_buf_size2 = SIZE_4KB;
350144961713Sgirish 		}
350244961713Sgirish 	}
350344961713Sgirish 
350444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
350544961713Sgirish 		"==> nxge_map_rxdma_channel_buf_ring: channel %d "
350644961713Sgirish 		"actual rbr max %d rbb_max %d nmsgs %d "
350744961713Sgirish 		"rbrp->block_size %d default_block_size %d "
350844961713Sgirish 		"(config nxge_rbr_size %d nxge_rbr_spare_size %d)",
350944961713Sgirish 		channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs,
351044961713Sgirish 		rbrp->block_size, nxgep->rx_default_block_size,
351144961713Sgirish 		nxge_rbr_size, nxge_rbr_spare_size));
351244961713Sgirish 
351344961713Sgirish 	/* Map in buffers from the buffer pool.  */
351444961713Sgirish 	index = 0;
351544961713Sgirish 	for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) {
351644961713Sgirish 		bsize = dma_bufp->block_size;
351744961713Sgirish 		nblocks = dma_bufp->nblocks;
351844961713Sgirish 		ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp;
351944961713Sgirish 		ring_info->buffer[i].buf_index = i;
352044961713Sgirish 		ring_info->buffer[i].buf_size = dma_bufp->alength;
352144961713Sgirish 		ring_info->buffer[i].start_index = index;
352244961713Sgirish 		ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp;
352344961713Sgirish 
352444961713Sgirish 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
352544961713Sgirish 			" nxge_map_rxdma_channel_buf_ring: map channel %d "
352644961713Sgirish 			"chunk %d"
352744961713Sgirish 			" nblocks %d chunk_size %x block_size 0x%x "
352844961713Sgirish 			"dma_bufp $%p", channel, i,
352944961713Sgirish 			dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize,
353044961713Sgirish 			dma_bufp));
353144961713Sgirish 
353244961713Sgirish 		for (j = 0; j < nblocks; j++) {
353344961713Sgirish 			if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO,
353444961713Sgirish 					dma_bufp)) == NULL) {
353556d930aeSspeer 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
353656d930aeSspeer 					"allocb failed (index %d i %d j %d)",
353756d930aeSspeer 					index, i, j));
353856d930aeSspeer 				goto nxge_map_rxdma_channel_buf_ring_fail1;
353944961713Sgirish 			}
354044961713Sgirish 			rx_msg_ring[index] = rx_msg_p;
354144961713Sgirish 			rx_msg_p->block_index = index;
354244961713Sgirish 			rx_msg_p->shifted_addr = (uint32_t)
354344961713Sgirish 				((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >>
354444961713Sgirish 					    RBR_BKADDR_SHIFT));
354544961713Sgirish 
354644961713Sgirish 			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
354756d930aeSspeer 				"index %d j %d rx_msg_p $%p mblk %p",
354856d930aeSspeer 				index, j, rx_msg_p, rx_msg_p->rx_mblk_p));
354944961713Sgirish 
355044961713Sgirish 			mblk_p = rx_msg_p->rx_mblk_p;
355144961713Sgirish 			mblk_p->b_wptr = mblk_p->b_rptr + bsize;
355244961713Sgirish 			index++;
355344961713Sgirish 			rx_msg_p->buf_dma.dma_channel = channel;
355444961713Sgirish 		}
355544961713Sgirish 	}
355644961713Sgirish 	if (i < rbrp->num_blocks) {
355744961713Sgirish 		goto nxge_map_rxdma_channel_buf_ring_fail1;
355844961713Sgirish 	}
355944961713Sgirish 
356044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
356144961713Sgirish 		"nxge_map_rxdma_channel_buf_ring: done buf init "
356244961713Sgirish 			"channel %d msg block entries %d",
356344961713Sgirish 			channel, index));
356444961713Sgirish 	ring_info->block_size_mask = bsize - 1;
356544961713Sgirish 	rbrp->rx_msg_ring = rx_msg_ring;
356644961713Sgirish 	rbrp->dma_bufp = dma_buf_p;
356744961713Sgirish 	rbrp->ring_info = ring_info;
356844961713Sgirish 
356944961713Sgirish 	status = nxge_rxbuf_index_info_init(nxgep, rbrp);
357044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
357144961713Sgirish 		" nxge_map_rxdma_channel_buf_ring: "
357244961713Sgirish 		"channel %d done buf info init", channel));
357344961713Sgirish 
357444961713Sgirish 	*rbr_p = rbrp;
357544961713Sgirish 	goto nxge_map_rxdma_channel_buf_ring_exit;
357644961713Sgirish 
357744961713Sgirish nxge_map_rxdma_channel_buf_ring_fail1:
357844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
357944961713Sgirish 		" nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)",
358044961713Sgirish 		channel, status));
358144961713Sgirish 
358244961713Sgirish 	index--;
358344961713Sgirish 	for (; index >= 0; index--) {
358444961713Sgirish 		rx_msg_p = rx_msg_ring[index];
358544961713Sgirish 		if (rx_msg_p != NULL) {
358614ea4bb7Ssd 			freeb(rx_msg_p->rx_mblk_p);
358744961713Sgirish 			rx_msg_ring[index] = NULL;
358844961713Sgirish 		}
358944961713Sgirish 	}
359044961713Sgirish nxge_map_rxdma_channel_buf_ring_fail:
359144961713Sgirish 	MUTEX_DESTROY(&rbrp->post_lock);
359244961713Sgirish 	MUTEX_DESTROY(&rbrp->lock);
359344961713Sgirish 	KMEM_FREE(ring_info, sizeof (rxring_info_t));
359444961713Sgirish 	KMEM_FREE(rx_msg_ring, size);
359544961713Sgirish 	KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t));
359644961713Sgirish 
359756d930aeSspeer 	status = NXGE_ERROR;
359856d930aeSspeer 
359944961713Sgirish nxge_map_rxdma_channel_buf_ring_exit:
360044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
360144961713Sgirish 		"<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status));
360244961713Sgirish 
360344961713Sgirish 	return (status);
360444961713Sgirish }
360544961713Sgirish 
360644961713Sgirish /*ARGSUSED*/
360744961713Sgirish static void
360844961713Sgirish nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep,
360944961713Sgirish     p_rx_rbr_ring_t rbr_p)
361044961713Sgirish {
361144961713Sgirish 	p_rx_msg_t 		*rx_msg_ring;
361244961713Sgirish 	p_rx_msg_t 		rx_msg_p;
361344961713Sgirish 	rxring_info_t 		*ring_info;
361444961713Sgirish 	int			i;
361544961713Sgirish 	uint32_t		size;
361644961713Sgirish #ifdef	NXGE_DEBUG
361744961713Sgirish 	int			num_chunks;
361844961713Sgirish #endif
361944961713Sgirish 
362044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
362144961713Sgirish 		"==> nxge_unmap_rxdma_channel_buf_ring"));
362244961713Sgirish 	if (rbr_p == NULL) {
362344961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
362444961713Sgirish 			"<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp"));
362544961713Sgirish 		return;
362644961713Sgirish 	}
362744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
362844961713Sgirish 		"==> nxge_unmap_rxdma_channel_buf_ring: channel %d",
362944961713Sgirish 		rbr_p->rdc));
363044961713Sgirish 
363144961713Sgirish 	rx_msg_ring = rbr_p->rx_msg_ring;
363244961713Sgirish 	ring_info = rbr_p->ring_info;
363344961713Sgirish 
363444961713Sgirish 	if (rx_msg_ring == NULL || ring_info == NULL) {
363544961713Sgirish 			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
363644961713Sgirish 		"<== nxge_unmap_rxdma_channel_buf_ring: "
363744961713Sgirish 		"rx_msg_ring $%p ring_info $%p",
363844961713Sgirish 		rx_msg_p, ring_info));
363944961713Sgirish 		return;
364044961713Sgirish 	}
364144961713Sgirish 
364244961713Sgirish #ifdef	NXGE_DEBUG
364344961713Sgirish 	num_chunks = rbr_p->num_blocks;
364444961713Sgirish #endif
364544961713Sgirish 	size = rbr_p->tnblocks * sizeof (p_rx_msg_t);
364644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
364744961713Sgirish 		" nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d "
364844961713Sgirish 		"tnblocks %d (max %d) size ptrs %d ",
364944961713Sgirish 		rbr_p->rdc, num_chunks,
365044961713Sgirish 		rbr_p->tnblocks, rbr_p->rbr_max_size, size));
365144961713Sgirish 
365244961713Sgirish 	for (i = 0; i < rbr_p->tnblocks; i++) {
365344961713Sgirish 		rx_msg_p = rx_msg_ring[i];
365444961713Sgirish 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
365544961713Sgirish 			" nxge_unmap_rxdma_channel_buf_ring: "
365644961713Sgirish 			"rx_msg_p $%p",
365744961713Sgirish 			rx_msg_p));
365844961713Sgirish 		if (rx_msg_p != NULL) {
365914ea4bb7Ssd 			freeb(rx_msg_p->rx_mblk_p);
366044961713Sgirish 			rx_msg_ring[i] = NULL;
366144961713Sgirish 		}
366244961713Sgirish 	}
366344961713Sgirish 
366444961713Sgirish 	MUTEX_DESTROY(&rbr_p->post_lock);
366544961713Sgirish 	MUTEX_DESTROY(&rbr_p->lock);
366644961713Sgirish 	KMEM_FREE(ring_info, sizeof (rxring_info_t));
366744961713Sgirish 	KMEM_FREE(rx_msg_ring, size);
366844961713Sgirish 	KMEM_FREE(rbr_p, sizeof (rx_rbr_ring_t));
366944961713Sgirish 
367044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
367144961713Sgirish 		"<== nxge_unmap_rxdma_channel_buf_ring"));
367244961713Sgirish }
367344961713Sgirish 
367444961713Sgirish static nxge_status_t
367544961713Sgirish nxge_rxdma_hw_start_common(p_nxge_t nxgep)
367644961713Sgirish {
367744961713Sgirish 	nxge_status_t		status = NXGE_OK;
367844961713Sgirish 
367944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common"));
368044961713Sgirish 
368144961713Sgirish 	/*
368244961713Sgirish 	 * Load the sharable parameters by writing to the
368344961713Sgirish 	 * function zero control registers. These FZC registers
368444961713Sgirish 	 * should be initialized only once for the entire chip.
368544961713Sgirish 	 */
368644961713Sgirish 	(void) nxge_init_fzc_rx_common(nxgep);
368744961713Sgirish 
368844961713Sgirish 	/*
368944961713Sgirish 	 * Initialize the RXDMA port specific FZC control configurations.
369044961713Sgirish 	 * These FZC registers are pertaining to each port.
369144961713Sgirish 	 */
369244961713Sgirish 	(void) nxge_init_fzc_rxdma_port(nxgep);
369344961713Sgirish 
369444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common"));
369544961713Sgirish 
369644961713Sgirish 	return (status);
369744961713Sgirish }
369844961713Sgirish 
369944961713Sgirish /*ARGSUSED*/
370044961713Sgirish static void
370144961713Sgirish nxge_rxdma_hw_stop_common(p_nxge_t nxgep)
370244961713Sgirish {
370344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop_common"));
370444961713Sgirish 
370544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop_common"));
370644961713Sgirish }
370744961713Sgirish 
370844961713Sgirish static nxge_status_t
370944961713Sgirish nxge_rxdma_hw_start(p_nxge_t nxgep)
371044961713Sgirish {
371144961713Sgirish 	int			i, ndmas;
371244961713Sgirish 	uint16_t		channel;
371344961713Sgirish 	p_rx_rbr_rings_t 	rx_rbr_rings;
371444961713Sgirish 	p_rx_rbr_ring_t		*rbr_rings;
371544961713Sgirish 	p_rx_rcr_rings_t 	rx_rcr_rings;
371644961713Sgirish 	p_rx_rcr_ring_t		*rcr_rings;
371744961713Sgirish 	p_rx_mbox_areas_t 	rx_mbox_areas_p;
371844961713Sgirish 	p_rx_mbox_t		*rx_mbox_p;
371944961713Sgirish 	nxge_status_t		status = NXGE_OK;
372044961713Sgirish 
372144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start"));
372244961713Sgirish 
372344961713Sgirish 	rx_rbr_rings = nxgep->rx_rbr_rings;
372444961713Sgirish 	rx_rcr_rings = nxgep->rx_rcr_rings;
372544961713Sgirish 	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
372644961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
372744961713Sgirish 			"<== nxge_rxdma_hw_start: NULL ring pointers"));
372844961713Sgirish 		return (NXGE_ERROR);
372944961713Sgirish 	}
373044961713Sgirish 	ndmas = rx_rbr_rings->ndmas;
373144961713Sgirish 	if (ndmas == 0) {
373244961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
373344961713Sgirish 			"<== nxge_rxdma_hw_start: no dma channel allocated"));
373444961713Sgirish 		return (NXGE_ERROR);
373544961713Sgirish 	}
373644961713Sgirish 
373744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
373844961713Sgirish 		"==> nxge_rxdma_hw_start (ndmas %d)", ndmas));
373944961713Sgirish 
374044961713Sgirish 	rbr_rings = rx_rbr_rings->rbr_rings;
374144961713Sgirish 	rcr_rings = rx_rcr_rings->rcr_rings;
374244961713Sgirish 	rx_mbox_areas_p = nxgep->rx_mbox_areas_p;
374344961713Sgirish 	if (rx_mbox_areas_p) {
374444961713Sgirish 		rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
374544961713Sgirish 	}
374644961713Sgirish 
374744961713Sgirish 	for (i = 0; i < ndmas; i++) {
374844961713Sgirish 		channel = rbr_rings[i]->rdc;
374944961713Sgirish 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
375044961713Sgirish 			"==> nxge_rxdma_hw_start (ndmas %d) channel %d",
375144961713Sgirish 				ndmas, channel));
375244961713Sgirish 		status = nxge_rxdma_start_channel(nxgep, channel,
375344961713Sgirish 				(p_rx_rbr_ring_t)rbr_rings[i],
375444961713Sgirish 				(p_rx_rcr_ring_t)rcr_rings[i],
375544961713Sgirish 				(p_rx_mbox_t)rx_mbox_p[i]);
375644961713Sgirish 		if (status != NXGE_OK) {
375744961713Sgirish 			goto nxge_rxdma_hw_start_fail1;
375844961713Sgirish 		}
375944961713Sgirish 	}
376044961713Sgirish 
376144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: "
376244961713Sgirish 		"rx_rbr_rings 0x%016llx rings 0x%016llx",
376344961713Sgirish 		rx_rbr_rings, rx_rcr_rings));
376444961713Sgirish 
376544961713Sgirish 	goto nxge_rxdma_hw_start_exit;
376644961713Sgirish 
376744961713Sgirish nxge_rxdma_hw_start_fail1:
376844961713Sgirish 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
376944961713Sgirish 		"==> nxge_rxdma_hw_start: disable "
377044961713Sgirish 		"(status 0x%x channel %d i %d)", status, channel, i));
377144961713Sgirish 	for (; i >= 0; i--) {
377244961713Sgirish 		channel = rbr_rings[i]->rdc;
377344961713Sgirish 		(void) nxge_rxdma_stop_channel(nxgep, channel);
377444961713Sgirish 	}
377544961713Sgirish 
377644961713Sgirish nxge_rxdma_hw_start_exit:
377744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
377844961713Sgirish 		"==> nxge_rxdma_hw_start: (status 0x%x)", status));
377944961713Sgirish 
378044961713Sgirish 	return (status);
378144961713Sgirish }
378244961713Sgirish 
378344961713Sgirish static void
378444961713Sgirish nxge_rxdma_hw_stop(p_nxge_t nxgep)
378544961713Sgirish {
378644961713Sgirish 	int			i, ndmas;
378744961713Sgirish 	uint16_t		channel;
378844961713Sgirish 	p_rx_rbr_rings_t 	rx_rbr_rings;
378944961713Sgirish 	p_rx_rbr_ring_t		*rbr_rings;
379044961713Sgirish 	p_rx_rcr_rings_t 	rx_rcr_rings;
379144961713Sgirish 
379244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop"));
379344961713Sgirish 
379444961713Sgirish 	rx_rbr_rings = nxgep->rx_rbr_rings;
379544961713Sgirish 	rx_rcr_rings = nxgep->rx_rcr_rings;
379644961713Sgirish 	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
379744961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
379844961713Sgirish 			"<== nxge_rxdma_hw_stop: NULL ring pointers"));
379944961713Sgirish 		return;
380044961713Sgirish 	}
380144961713Sgirish 	ndmas = rx_rbr_rings->ndmas;
380244961713Sgirish 	if (!ndmas) {
380344961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
380444961713Sgirish 			"<== nxge_rxdma_hw_stop: no dma channel allocated"));
380544961713Sgirish 		return;
380644961713Sgirish 	}
380744961713Sgirish 
380844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
380944961713Sgirish 		"==> nxge_rxdma_hw_stop (ndmas %d)", ndmas));
381044961713Sgirish 
381144961713Sgirish 	rbr_rings = rx_rbr_rings->rbr_rings;
381244961713Sgirish 
381344961713Sgirish 	for (i = 0; i < ndmas; i++) {
381444961713Sgirish 		channel = rbr_rings[i]->rdc;
381544961713Sgirish 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
381644961713Sgirish 			"==> nxge_rxdma_hw_stop (ndmas %d) channel %d",
381744961713Sgirish 				ndmas, channel));
381844961713Sgirish 		(void) nxge_rxdma_stop_channel(nxgep, channel);
381944961713Sgirish 	}
382044961713Sgirish 
382144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: "
382244961713Sgirish 		"rx_rbr_rings 0x%016llx rings 0x%016llx",
382344961713Sgirish 		rx_rbr_rings, rx_rcr_rings));
382444961713Sgirish 
382544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop"));
382644961713Sgirish }
382744961713Sgirish 
382844961713Sgirish 
382944961713Sgirish static nxge_status_t
383044961713Sgirish nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel,
383144961713Sgirish     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
383244961713Sgirish 
383344961713Sgirish {
383444961713Sgirish 	npi_handle_t		handle;
383544961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
383644961713Sgirish 	rx_dma_ctl_stat_t	cs;
383744961713Sgirish 	rx_dma_ent_msk_t	ent_mask;
383844961713Sgirish 	nxge_status_t		status = NXGE_OK;
383944961713Sgirish 
384044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel"));
384144961713Sgirish 
384244961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
384344961713Sgirish 
384444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: "
384544961713Sgirish 		"npi handle addr $%p acc $%p",
384644961713Sgirish 		nxgep->npi_handle.regp, nxgep->npi_handle.regh));
384744961713Sgirish 
384844961713Sgirish 	/* Reset RXDMA channel */
384944961713Sgirish 	rs = npi_rxdma_cfg_rdc_reset(handle, channel);
385044961713Sgirish 	if (rs != NPI_SUCCESS) {
385144961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
385244961713Sgirish 			"==> nxge_rxdma_start_channel: "
385344961713Sgirish 			"reset rxdma failed (0x%08x channel %d)",
385444961713Sgirish 			status, channel));
385544961713Sgirish 		return (NXGE_ERROR | rs);
385644961713Sgirish 	}
385744961713Sgirish 
385844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
385944961713Sgirish 		"==> nxge_rxdma_start_channel: reset done: channel %d",
386044961713Sgirish 		channel));
386144961713Sgirish 
386244961713Sgirish 	/*
386344961713Sgirish 	 * Initialize the RXDMA channel specific FZC control
386444961713Sgirish 	 * configurations. These FZC registers are pertaining
386544961713Sgirish 	 * to each RX channel (logical pages).
386644961713Sgirish 	 */
386744961713Sgirish 	status = nxge_init_fzc_rxdma_channel(nxgep,
386844961713Sgirish 			channel, rbr_p, rcr_p, mbox_p);
386944961713Sgirish 	if (status != NXGE_OK) {
387044961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
387144961713Sgirish 			"==> nxge_rxdma_start_channel: "
387244961713Sgirish 			"init fzc rxdma failed (0x%08x channel %d)",
387344961713Sgirish 			status, channel));
387444961713Sgirish 		return (status);
387544961713Sgirish 	}
387644961713Sgirish 
387744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
387844961713Sgirish 		"==> nxge_rxdma_start_channel: fzc done"));
387944961713Sgirish 
388044961713Sgirish 	/*
388144961713Sgirish 	 * Zero out the shadow  and prefetch ram.
388244961713Sgirish 	 */
388344961713Sgirish 
388444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
388544961713Sgirish 		"ram done"));
388644961713Sgirish 
388744961713Sgirish 	/* Set up the interrupt event masks. */
388844961713Sgirish 	ent_mask.value = 0;
388944961713Sgirish 	ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK;
389044961713Sgirish 	rs = npi_rxdma_event_mask(handle, OP_SET, channel,
389144961713Sgirish 			&ent_mask);
389244961713Sgirish 	if (rs != NPI_SUCCESS) {
389344961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
389444961713Sgirish 			"==> nxge_rxdma_start_channel: "
389544961713Sgirish 			"init rxdma event masks failed (0x%08x channel %d)",
389644961713Sgirish 			status, channel));
389744961713Sgirish 		return (NXGE_ERROR | rs);
389844961713Sgirish 	}
389944961713Sgirish 
390044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
390144961713Sgirish 		"event done: channel %d (mask 0x%016llx)",
390244961713Sgirish 		channel, ent_mask.value));
390344961713Sgirish 
390444961713Sgirish 	/* Initialize the receive DMA control and status register */
390544961713Sgirish 	cs.value = 0;
390644961713Sgirish 	cs.bits.hdw.mex = 1;
390744961713Sgirish 	cs.bits.hdw.rcrthres = 1;
390844961713Sgirish 	cs.bits.hdw.rcrto = 1;
390944961713Sgirish 	cs.bits.hdw.rbr_empty = 1;
391044961713Sgirish 	status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs);
391144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
391244961713Sgirish 		"channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value));
391344961713Sgirish 	if (status != NXGE_OK) {
391444961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
391544961713Sgirish 			"==> nxge_rxdma_start_channel: "
391644961713Sgirish 			"init rxdma control register failed (0x%08x channel %d",
391744961713Sgirish 			status, channel));
391844961713Sgirish 		return (status);
391944961713Sgirish 	}
392044961713Sgirish 
392144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
392244961713Sgirish 		"control done - channel %d cs 0x%016llx", channel, cs.value));
392344961713Sgirish 
392444961713Sgirish 	/*
392544961713Sgirish 	 * Load RXDMA descriptors, buffers, mailbox,
392644961713Sgirish 	 * initialise the receive DMA channels and
392744961713Sgirish 	 * enable each DMA channel.
392844961713Sgirish 	 */
392944961713Sgirish 	status = nxge_enable_rxdma_channel(nxgep,
393044961713Sgirish 			channel, rbr_p, rcr_p, mbox_p);
393144961713Sgirish 
393244961713Sgirish 	if (status != NXGE_OK) {
393344961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
393444961713Sgirish 			    " nxge_rxdma_start_channel: "
393544961713Sgirish 			    " init enable rxdma failed (0x%08x channel %d)",
393644961713Sgirish 			    status, channel));
393744961713Sgirish 		return (status);
393844961713Sgirish 	}
393944961713Sgirish 
394044961713Sgirish 	ent_mask.value = 0;
394144961713Sgirish 	ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK |
394244961713Sgirish 				RX_DMA_ENT_MSK_PTDROP_PKT_MASK);
394344961713Sgirish 	rs = npi_rxdma_event_mask(handle, OP_SET, channel,
394444961713Sgirish 			&ent_mask);
394544961713Sgirish 	if (rs != NPI_SUCCESS) {
394644961713Sgirish 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
394744961713Sgirish 			"==> nxge_rxdma_start_channel: "
394844961713Sgirish 			"init rxdma event masks failed (0x%08x channel %d)",
394944961713Sgirish 			status, channel));
395044961713Sgirish 		return (NXGE_ERROR | rs);
395144961713Sgirish 	}
395244961713Sgirish 
395344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
395444961713Sgirish 		"control done - channel %d cs 0x%016llx", channel, cs.value));
395544961713Sgirish 
395644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
395744961713Sgirish 		"==> nxge_rxdma_start_channel: enable done"));
395844961713Sgirish 
395944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel"));
396044961713Sgirish 
396144961713Sgirish 	return (NXGE_OK);
396244961713Sgirish }
396344961713Sgirish 
396444961713Sgirish static nxge_status_t
396544961713Sgirish nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel)
396644961713Sgirish {
396744961713Sgirish 	npi_handle_t		handle;
396844961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
396944961713Sgirish 	rx_dma_ctl_stat_t	cs;
397044961713Sgirish 	rx_dma_ent_msk_t	ent_mask;
397144961713Sgirish 	nxge_status_t		status = NXGE_OK;
397244961713Sgirish 
397344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel"));
397444961713Sgirish 
397544961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
397644961713Sgirish 
397744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: "
397844961713Sgirish 		"npi handle addr $%p acc $%p",
397944961713Sgirish 		nxgep->npi_handle.regp, nxgep->npi_handle.regh));
398044961713Sgirish 
398144961713Sgirish 	/* Reset RXDMA channel */
398244961713Sgirish 	rs = npi_rxdma_cfg_rdc_reset(handle, channel);
398344961713Sgirish 	if (rs != NPI_SUCCESS) {
398444961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
398544961713Sgirish 			    " nxge_rxdma_stop_channel: "
398644961713Sgirish 			    " reset rxdma failed (0x%08x channel %d)",
398744961713Sgirish 			    rs, channel));
398844961713Sgirish 		return (NXGE_ERROR | rs);
398944961713Sgirish 	}
399044961713Sgirish 
399144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
399244961713Sgirish 		"==> nxge_rxdma_stop_channel: reset done"));
399344961713Sgirish 
399444961713Sgirish 	/* Set up the interrupt event masks. */
399544961713Sgirish 	ent_mask.value = RX_DMA_ENT_MSK_ALL;
399644961713Sgirish 	rs = npi_rxdma_event_mask(handle, OP_SET, channel,
399744961713Sgirish 			&ent_mask);
399844961713Sgirish 	if (rs != NPI_SUCCESS) {
399944961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
400044961713Sgirish 			    "==> nxge_rxdma_stop_channel: "
400144961713Sgirish 			    "set rxdma event masks failed (0x%08x channel %d)",
400244961713Sgirish 			    rs, channel));
400344961713Sgirish 		return (NXGE_ERROR | rs);
400444961713Sgirish 	}
400544961713Sgirish 
400644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
400744961713Sgirish 		"==> nxge_rxdma_stop_channel: event done"));
400844961713Sgirish 
400944961713Sgirish 	/* Initialize the receive DMA control and status register */
401044961713Sgirish 	cs.value = 0;
401144961713Sgirish 	status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel,
401244961713Sgirish 			&cs);
401344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control "
401444961713Sgirish 		" to default (all 0s) 0x%08x", cs.value));
401544961713Sgirish 	if (status != NXGE_OK) {
401644961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
401744961713Sgirish 			    " nxge_rxdma_stop_channel: init rxdma"
401844961713Sgirish 			    " control register failed (0x%08x channel %d",
401944961713Sgirish 			status, channel));
402044961713Sgirish 		return (status);
402144961713Sgirish 	}
402244961713Sgirish 
402344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
402444961713Sgirish 		"==> nxge_rxdma_stop_channel: control done"));
402544961713Sgirish 
402644961713Sgirish 	/* disable dma channel */
402744961713Sgirish 	status = nxge_disable_rxdma_channel(nxgep, channel);
402844961713Sgirish 
402944961713Sgirish 	if (status != NXGE_OK) {
403044961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
403144961713Sgirish 			    " nxge_rxdma_stop_channel: "
403244961713Sgirish 			    " init enable rxdma failed (0x%08x channel %d)",
403344961713Sgirish 			    status, channel));
403444961713Sgirish 		return (status);
403544961713Sgirish 	}
403644961713Sgirish 
403744961713Sgirish 	NXGE_DEBUG_MSG((nxgep,
403844961713Sgirish 		RX_CTL, "==> nxge_rxdma_stop_channel: disable done"));
403944961713Sgirish 
404044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel"));
404144961713Sgirish 
404244961713Sgirish 	return (NXGE_OK);
404344961713Sgirish }
404444961713Sgirish 
404544961713Sgirish nxge_status_t
404644961713Sgirish nxge_rxdma_handle_sys_errors(p_nxge_t nxgep)
404744961713Sgirish {
404844961713Sgirish 	npi_handle_t		handle;
404944961713Sgirish 	p_nxge_rdc_sys_stats_t	statsp;
405044961713Sgirish 	rx_ctl_dat_fifo_stat_t	stat;
405144961713Sgirish 	uint32_t		zcp_err_status;
405244961713Sgirish 	uint32_t		ipp_err_status;
405344961713Sgirish 	nxge_status_t		status = NXGE_OK;
405444961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
405544961713Sgirish 	boolean_t		my_err = B_FALSE;
405644961713Sgirish 
405744961713Sgirish 	handle = nxgep->npi_handle;
405844961713Sgirish 	statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats;
405944961713Sgirish 
406044961713Sgirish 	rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat);
406144961713Sgirish 
406244961713Sgirish 	if (rs != NPI_SUCCESS)
406344961713Sgirish 		return (NXGE_ERROR | rs);
406444961713Sgirish 
406544961713Sgirish 	if (stat.bits.ldw.id_mismatch) {
406644961713Sgirish 		statsp->id_mismatch++;
406744961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL,
406844961713Sgirish 					NXGE_FM_EREPORT_RDMC_ID_MISMATCH);
406944961713Sgirish 		/* Global fatal error encountered */
407044961713Sgirish 	}
407144961713Sgirish 
407244961713Sgirish 	if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) {
407344961713Sgirish 		switch (nxgep->mac.portnum) {
407444961713Sgirish 		case 0:
407544961713Sgirish 			if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) ||
407644961713Sgirish 				(stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) {
407744961713Sgirish 				my_err = B_TRUE;
407844961713Sgirish 				zcp_err_status = stat.bits.ldw.zcp_eop_err;
407944961713Sgirish 				ipp_err_status = stat.bits.ldw.ipp_eop_err;
408044961713Sgirish 			}
408144961713Sgirish 			break;
408244961713Sgirish 		case 1:
408344961713Sgirish 			if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) ||
408444961713Sgirish 				(stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) {
408544961713Sgirish 				my_err = B_TRUE;
408644961713Sgirish 				zcp_err_status = stat.bits.ldw.zcp_eop_err;
408744961713Sgirish 				ipp_err_status = stat.bits.ldw.ipp_eop_err;
408844961713Sgirish 			}
408944961713Sgirish 			break;
409044961713Sgirish 		case 2:
409144961713Sgirish 			if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) ||
409244961713Sgirish 				(stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) {
409344961713Sgirish 				my_err = B_TRUE;
409444961713Sgirish 				zcp_err_status = stat.bits.ldw.zcp_eop_err;
409544961713Sgirish 				ipp_err_status = stat.bits.ldw.ipp_eop_err;
409644961713Sgirish 			}
409744961713Sgirish 			break;
409844961713Sgirish 		case 3:
409944961713Sgirish 			if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) ||
410044961713Sgirish 				(stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) {
410144961713Sgirish 				my_err = B_TRUE;
410244961713Sgirish 				zcp_err_status = stat.bits.ldw.zcp_eop_err;
410344961713Sgirish 				ipp_err_status = stat.bits.ldw.ipp_eop_err;
410444961713Sgirish 			}
410544961713Sgirish 			break;
410644961713Sgirish 		default:
410744961713Sgirish 			return (NXGE_ERROR);
410844961713Sgirish 		}
410944961713Sgirish 	}
411044961713Sgirish 
411144961713Sgirish 	if (my_err) {
411244961713Sgirish 		status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status,
411344961713Sgirish 							zcp_err_status);
411444961713Sgirish 		if (status != NXGE_OK)
411544961713Sgirish 			return (status);
411644961713Sgirish 	}
411744961713Sgirish 
411844961713Sgirish 	return (NXGE_OK);
411944961713Sgirish }
412044961713Sgirish 
412144961713Sgirish static nxge_status_t
412244961713Sgirish nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status,
412344961713Sgirish 							uint32_t zcp_status)
412444961713Sgirish {
412544961713Sgirish 	boolean_t		rxport_fatal = B_FALSE;
412644961713Sgirish 	p_nxge_rdc_sys_stats_t	statsp;
412744961713Sgirish 	nxge_status_t		status = NXGE_OK;
412844961713Sgirish 	uint8_t			portn;
412944961713Sgirish 
413044961713Sgirish 	portn = nxgep->mac.portnum;
413144961713Sgirish 	statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats;
413244961713Sgirish 
413344961713Sgirish 	if (ipp_status & (0x1 << portn)) {
413444961713Sgirish 		statsp->ipp_eop_err++;
413544961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
413644961713Sgirish 					NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR);
413744961713Sgirish 		rxport_fatal = B_TRUE;
413844961713Sgirish 	}
413944961713Sgirish 
414044961713Sgirish 	if (zcp_status & (0x1 << portn)) {
414144961713Sgirish 		statsp->zcp_eop_err++;
414244961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
414344961713Sgirish 					NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR);
414444961713Sgirish 		rxport_fatal = B_TRUE;
414544961713Sgirish 	}
414644961713Sgirish 
414744961713Sgirish 	if (rxport_fatal) {
414844961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
414944961713Sgirish 			    " nxge_rxdma_handle_port_error: "
415044961713Sgirish 			    " fatal error on Port #%d\n",
415144961713Sgirish 				portn));
415244961713Sgirish 		status = nxge_rx_port_fatal_err_recover(nxgep);
415344961713Sgirish 		if (status == NXGE_OK) {
415444961713Sgirish 			FM_SERVICE_RESTORED(nxgep);
415544961713Sgirish 		}
415644961713Sgirish 	}
415744961713Sgirish 
415844961713Sgirish 	return (status);
415944961713Sgirish }
416044961713Sgirish 
416144961713Sgirish static nxge_status_t
416244961713Sgirish nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel)
416344961713Sgirish {
416444961713Sgirish 	npi_handle_t		handle;
416544961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
416644961713Sgirish 	nxge_status_t		status = NXGE_OK;
416744961713Sgirish 	p_rx_rbr_ring_t		rbrp;
416844961713Sgirish 	p_rx_rcr_ring_t		rcrp;
416944961713Sgirish 	p_rx_mbox_t		mboxp;
417044961713Sgirish 	rx_dma_ent_msk_t	ent_mask;
417144961713Sgirish 	p_nxge_dma_common_t	dmap;
417244961713Sgirish 	int			ring_idx;
417344961713Sgirish 	uint32_t		ref_cnt;
417444961713Sgirish 	p_rx_msg_t		rx_msg_p;
417544961713Sgirish 	int			i;
417644961713Sgirish 	uint32_t		nxge_port_rcr_size;
417744961713Sgirish 
417844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover"));
417944961713Sgirish 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
418044961713Sgirish 			"Recovering from RxDMAChannel#%d error...", channel));
418144961713Sgirish 
418244961713Sgirish 	/*
418344961713Sgirish 	 * Stop the dma channel waits for the stop done.
418444961713Sgirish 	 * If the stop done bit is not set, then create
418544961713Sgirish 	 * an error.
418644961713Sgirish 	 */
418744961713Sgirish 
418844961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
418944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop..."));
419044961713Sgirish 
419144961713Sgirish 	ring_idx = nxge_rxdma_get_ring_index(nxgep, channel);
419244961713Sgirish 	rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[ring_idx];
419344961713Sgirish 	rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[ring_idx];
419444961713Sgirish 
419544961713Sgirish 	MUTEX_ENTER(&rcrp->lock);
419644961713Sgirish 	MUTEX_ENTER(&rbrp->lock);
419744961713Sgirish 	MUTEX_ENTER(&rbrp->post_lock);
419844961713Sgirish 
419944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel..."));
420044961713Sgirish 
420144961713Sgirish 	rs = npi_rxdma_cfg_rdc_disable(handle, channel);
420244961713Sgirish 	if (rs != NPI_SUCCESS) {
420344961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
420444961713Sgirish 			"nxge_disable_rxdma_channel:failed"));
420544961713Sgirish 		goto fail;
420644961713Sgirish 	}
420744961713Sgirish 
420844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt..."));
420944961713Sgirish 
421044961713Sgirish 	/* Disable interrupt */
421144961713Sgirish 	ent_mask.value = RX_DMA_ENT_MSK_ALL;
421244961713Sgirish 	rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
421344961713Sgirish 	if (rs != NPI_SUCCESS) {
421444961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
421544961713Sgirish 				"nxge_rxdma_stop_channel: "
421644961713Sgirish 				"set rxdma event masks failed (channel %d)",
421744961713Sgirish 				channel));
421844961713Sgirish 	}
421944961713Sgirish 
422044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset..."));
422144961713Sgirish 
422244961713Sgirish 	/* Reset RXDMA channel */
422344961713Sgirish 	rs = npi_rxdma_cfg_rdc_reset(handle, channel);
422444961713Sgirish 	if (rs != NPI_SUCCESS) {
422544961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
422644961713Sgirish 			"nxge_rxdma_fatal_err_recover: "
422744961713Sgirish 				" reset rxdma failed (channel %d)", channel));
422844961713Sgirish 		goto fail;
422944961713Sgirish 	}
423044961713Sgirish 
423144961713Sgirish 	nxge_port_rcr_size = nxgep->nxge_port_rcr_size;
423244961713Sgirish 
423344961713Sgirish 	mboxp =
423444961713Sgirish 	(p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx];
423544961713Sgirish 
423644961713Sgirish 	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
423744961713Sgirish 	rbrp->rbr_rd_index = 0;
423844961713Sgirish 
423944961713Sgirish 	rcrp->comp_rd_index = 0;
424044961713Sgirish 	rcrp->comp_wt_index = 0;
424144961713Sgirish 	rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
424244961713Sgirish 		(p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
424344961713Sgirish 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
424444961713Sgirish 		(p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
424544961713Sgirish 
424644961713Sgirish 	rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
424744961713Sgirish 		(nxge_port_rcr_size - 1);
424844961713Sgirish 	rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
424944961713Sgirish 		(nxge_port_rcr_size - 1);
425044961713Sgirish 
425144961713Sgirish 	dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
425244961713Sgirish 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
425344961713Sgirish 
425444961713Sgirish 	cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size);
425544961713Sgirish 
425644961713Sgirish 	for (i = 0; i < rbrp->rbr_max_size; i++) {
425744961713Sgirish 		rx_msg_p = rbrp->rx_msg_ring[i];
425844961713Sgirish 		ref_cnt = rx_msg_p->ref_cnt;
425944961713Sgirish 		if (ref_cnt != 1) {
4260a3c5bd6dSspeer 			if (rx_msg_p->cur_usage_cnt !=
4261a3c5bd6dSspeer 					rx_msg_p->max_usage_cnt) {
426244961713Sgirish 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
426344961713Sgirish 						"buf[%d]: cur_usage_cnt = %d "
426444961713Sgirish 						"max_usage_cnt = %d\n", i,
426544961713Sgirish 						rx_msg_p->cur_usage_cnt,
426644961713Sgirish 						rx_msg_p->max_usage_cnt));
4267a3c5bd6dSspeer 			} else {
4268a3c5bd6dSspeer 				/* Buffer can be re-posted */
4269a3c5bd6dSspeer 				rx_msg_p->free = B_TRUE;
4270a3c5bd6dSspeer 				rx_msg_p->cur_usage_cnt = 0;
4271a3c5bd6dSspeer 				rx_msg_p->max_usage_cnt = 0xbaddcafe;
4272a3c5bd6dSspeer 				rx_msg_p->pkt_buf_size = 0;
4273a3c5bd6dSspeer 			}
427444961713Sgirish 		}
427544961713Sgirish 	}
427644961713Sgirish 
427744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start..."));
427844961713Sgirish 
427944961713Sgirish 	status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp);
428044961713Sgirish 	if (status != NXGE_OK) {
428144961713Sgirish 		goto fail;
428244961713Sgirish 	}
428344961713Sgirish 
428444961713Sgirish 	MUTEX_EXIT(&rbrp->post_lock);
428544961713Sgirish 	MUTEX_EXIT(&rbrp->lock);
428644961713Sgirish 	MUTEX_EXIT(&rcrp->lock);
428744961713Sgirish 
428844961713Sgirish 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
428944961713Sgirish 			"Recovery Successful, RxDMAChannel#%d Restored",
429044961713Sgirish 			channel));
429144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover"));
429244961713Sgirish 
429344961713Sgirish 	return (NXGE_OK);
429444961713Sgirish fail:
429544961713Sgirish 	MUTEX_EXIT(&rbrp->post_lock);
429644961713Sgirish 	MUTEX_EXIT(&rbrp->lock);
429744961713Sgirish 	MUTEX_EXIT(&rcrp->lock);
429844961713Sgirish 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
429944961713Sgirish 
430044961713Sgirish 	return (NXGE_ERROR | rs);
430144961713Sgirish }
430244961713Sgirish 
430344961713Sgirish nxge_status_t
430444961713Sgirish nxge_rx_port_fatal_err_recover(p_nxge_t nxgep)
430544961713Sgirish {
430644961713Sgirish 	nxge_status_t		status = NXGE_OK;
430744961713Sgirish 	p_nxge_dma_common_t	*dma_buf_p;
430844961713Sgirish 	uint16_t		channel;
430944961713Sgirish 	int			ndmas;
431044961713Sgirish 	int			i;
431144961713Sgirish 
431244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover"));
431344961713Sgirish 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
431444961713Sgirish 				"Recovering from RxPort error..."));
431544961713Sgirish 	/* Disable RxMAC */
431644961713Sgirish 
431744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxMAC...\n"));
431844961713Sgirish 	if (nxge_rx_mac_disable(nxgep) != NXGE_OK)
431944961713Sgirish 		goto fail;
432044961713Sgirish 
432144961713Sgirish 	NXGE_DELAY(1000);
432244961713Sgirish 
432344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stop all RxDMA channels..."));
432444961713Sgirish 
432544961713Sgirish 	ndmas = nxgep->rx_buf_pool_p->ndmas;
432644961713Sgirish 	dma_buf_p = nxgep->rx_buf_pool_p->dma_buf_pool_p;
432744961713Sgirish 
432844961713Sgirish 	for (i = 0; i < ndmas; i++) {
432944961713Sgirish 		channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel;
433044961713Sgirish 		if (nxge_rxdma_fatal_err_recover(nxgep, channel) != NXGE_OK) {
433144961713Sgirish 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
433244961713Sgirish 					"Could not recover channel %d",
433344961713Sgirish 					channel));
433444961713Sgirish 		}
433544961713Sgirish 	}
433644961713Sgirish 
433744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset IPP..."));
433844961713Sgirish 
433944961713Sgirish 	/* Reset IPP */
434044961713Sgirish 	if (nxge_ipp_reset(nxgep) != NXGE_OK) {
434144961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
434244961713Sgirish 			"nxge_rx_port_fatal_err_recover: "
434344961713Sgirish 			"Failed to reset IPP"));
434444961713Sgirish 		goto fail;
434544961713Sgirish 	}
434644961713Sgirish 
434744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC..."));
434844961713Sgirish 
434944961713Sgirish 	/* Reset RxMAC */
435044961713Sgirish 	if (nxge_rx_mac_reset(nxgep) != NXGE_OK) {
435144961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
435244961713Sgirish 			"nxge_rx_port_fatal_err_recover: "
435344961713Sgirish 			"Failed to reset RxMAC"));
435444961713Sgirish 		goto fail;
435544961713Sgirish 	}
435644961713Sgirish 
435744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP..."));
435844961713Sgirish 
435944961713Sgirish 	/* Re-Initialize IPP */
436044961713Sgirish 	if (nxge_ipp_init(nxgep) != NXGE_OK) {
436144961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
436244961713Sgirish 			"nxge_rx_port_fatal_err_recover: "
436344961713Sgirish 			"Failed to init IPP"));
436444961713Sgirish 		goto fail;
436544961713Sgirish 	}
436644961713Sgirish 
436744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC..."));
436844961713Sgirish 
436944961713Sgirish 	/* Re-Initialize RxMAC */
437044961713Sgirish 	if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) {
437144961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
437244961713Sgirish 			"nxge_rx_port_fatal_err_recover: "
437344961713Sgirish 			"Failed to reset RxMAC"));
437444961713Sgirish 		goto fail;
437544961713Sgirish 	}
437644961713Sgirish 
437744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC..."));
437844961713Sgirish 
437944961713Sgirish 	/* Re-enable RxMAC */
438044961713Sgirish 	if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) {
438144961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
438244961713Sgirish 			"nxge_rx_port_fatal_err_recover: "
438344961713Sgirish 			"Failed to enable RxMAC"));
438444961713Sgirish 		goto fail;
438544961713Sgirish 	}
438644961713Sgirish 
438744961713Sgirish 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
438844961713Sgirish 			"Recovery Successful, RxPort Restored"));
438944961713Sgirish 
439044961713Sgirish 	return (NXGE_OK);
439144961713Sgirish fail:
439244961713Sgirish 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
439344961713Sgirish 	return (status);
439444961713Sgirish }
439544961713Sgirish 
439644961713Sgirish void
439744961713Sgirish nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan)
439844961713Sgirish {
439944961713Sgirish 	rx_dma_ctl_stat_t	cs;
440044961713Sgirish 	rx_ctl_dat_fifo_stat_t	cdfs;
440144961713Sgirish 
440244961713Sgirish 	switch (err_id) {
440344961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR:
440444961713Sgirish 	case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR:
440544961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR:
440644961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR:
440744961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RBR_TMOUT:
440844961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR:
440944961713Sgirish 	case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS:
441044961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR:
441144961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RCRINCON:
441244961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RCRFULL:
441344961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RBRFULL:
441444961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE:
441544961713Sgirish 	case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE:
441644961713Sgirish 	case NXGE_FM_EREPORT_RDMC_CONFIG_ERR:
441744961713Sgirish 		RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG,
441844961713Sgirish 			chan, &cs.value);
441944961713Sgirish 		if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR)
442044961713Sgirish 			cs.bits.hdw.rcr_ack_err = 1;
442144961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR)
442244961713Sgirish 			cs.bits.hdw.dc_fifo_err = 1;
442344961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR)
442444961713Sgirish 			cs.bits.hdw.rcr_sha_par = 1;
442544961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR)
442644961713Sgirish 			cs.bits.hdw.rbr_pre_par = 1;
442744961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT)
442844961713Sgirish 			cs.bits.hdw.rbr_tmout = 1;
442944961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR)
443044961713Sgirish 			cs.bits.hdw.rsp_cnt_err = 1;
443144961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS)
443244961713Sgirish 			cs.bits.hdw.byte_en_bus = 1;
443344961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR)
443444961713Sgirish 			cs.bits.hdw.rsp_dat_err = 1;
443544961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR)
443644961713Sgirish 			cs.bits.hdw.config_err = 1;
443744961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON)
443844961713Sgirish 			cs.bits.hdw.rcrincon = 1;
443944961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL)
444044961713Sgirish 			cs.bits.hdw.rcrfull = 1;
444144961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL)
444244961713Sgirish 			cs.bits.hdw.rbrfull = 1;
444344961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE)
444444961713Sgirish 			cs.bits.hdw.rbrlogpage = 1;
444544961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE)
444644961713Sgirish 			cs.bits.hdw.cfiglogpage = 1;
444744961713Sgirish 		cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n",
444844961713Sgirish 				cs.value);
444944961713Sgirish 		RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG,
445044961713Sgirish 			chan, cs.value);
445144961713Sgirish 		break;
445244961713Sgirish 	case NXGE_FM_EREPORT_RDMC_ID_MISMATCH:
445344961713Sgirish 	case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR:
445444961713Sgirish 	case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR:
445544961713Sgirish 		cdfs.value = 0;
445644961713Sgirish 		if (err_id ==  NXGE_FM_EREPORT_RDMC_ID_MISMATCH)
445744961713Sgirish 			cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum);
445844961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR)
445944961713Sgirish 			cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum);
446044961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR)
446144961713Sgirish 			cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum);
446244961713Sgirish 		cmn_err(CE_NOTE,
446344961713Sgirish 			"!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n",
446444961713Sgirish 			cdfs.value);
446544961713Sgirish 		RXDMA_REG_WRITE64(nxgep->npi_handle,
446644961713Sgirish 			RX_CTL_DAT_FIFO_STAT_DBG_REG, chan, cdfs.value);
446744961713Sgirish 		break;
446844961713Sgirish 	case NXGE_FM_EREPORT_RDMC_DCF_ERR:
446944961713Sgirish 		break;
447044961713Sgirish 	case NXGE_FM_EREPORT_RDMC_COMPLETION_ERR:
447144961713Sgirish 		break;
447244961713Sgirish 	}
447344961713Sgirish }
447414ea4bb7Ssd 
447514ea4bb7Ssd 
447614ea4bb7Ssd static uint16_t
447714ea4bb7Ssd nxge_get_pktbuf_size(p_nxge_t nxgep, int bufsz_type, rbr_cfig_b_t rbr_cfgb)
447814ea4bb7Ssd {
447914ea4bb7Ssd 	uint16_t sz = RBR_BKSIZE_8K_BYTES;
448014ea4bb7Ssd 
448114ea4bb7Ssd 	switch (bufsz_type) {
448214ea4bb7Ssd 	case RCR_PKTBUFSZ_0:
448314ea4bb7Ssd 		switch (rbr_cfgb.bits.ldw.bufsz0) {
448414ea4bb7Ssd 		case RBR_BUFSZ0_256B:
448514ea4bb7Ssd 			sz = RBR_BUFSZ0_256_BYTES;
448614ea4bb7Ssd 			break;
448714ea4bb7Ssd 		case RBR_BUFSZ0_512B:
448814ea4bb7Ssd 			sz = RBR_BUFSZ0_512B_BYTES;
448914ea4bb7Ssd 			break;
449014ea4bb7Ssd 		case RBR_BUFSZ0_1K:
449114ea4bb7Ssd 			sz = RBR_BUFSZ0_1K_BYTES;
449214ea4bb7Ssd 			break;
449314ea4bb7Ssd 		case RBR_BUFSZ0_2K:
449414ea4bb7Ssd 			sz = RBR_BUFSZ0_2K_BYTES;
449514ea4bb7Ssd 			break;
449614ea4bb7Ssd 		default:
449714ea4bb7Ssd 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
449814ea4bb7Ssd 			"nxge_get_pktbug_size: bad bufsz0"));
449914ea4bb7Ssd 			break;
450014ea4bb7Ssd 		}
450114ea4bb7Ssd 		break;
450214ea4bb7Ssd 	case RCR_PKTBUFSZ_1:
450314ea4bb7Ssd 		switch (rbr_cfgb.bits.ldw.bufsz1) {
450414ea4bb7Ssd 		case RBR_BUFSZ1_1K:
450514ea4bb7Ssd 			sz = RBR_BUFSZ1_1K_BYTES;
450614ea4bb7Ssd 			break;
450714ea4bb7Ssd 		case RBR_BUFSZ1_2K:
450814ea4bb7Ssd 			sz = RBR_BUFSZ1_2K_BYTES;
450914ea4bb7Ssd 			break;
451014ea4bb7Ssd 		case RBR_BUFSZ1_4K:
451114ea4bb7Ssd 			sz = RBR_BUFSZ1_4K_BYTES;
451214ea4bb7Ssd 			break;
451314ea4bb7Ssd 		case RBR_BUFSZ1_8K:
451414ea4bb7Ssd 			sz = RBR_BUFSZ1_8K_BYTES;
451514ea4bb7Ssd 			break;
451614ea4bb7Ssd 		default:
451714ea4bb7Ssd 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
451814ea4bb7Ssd 			"nxge_get_pktbug_size: bad bufsz1"));
451914ea4bb7Ssd 			break;
452014ea4bb7Ssd 		}
452114ea4bb7Ssd 		break;
452214ea4bb7Ssd 	case RCR_PKTBUFSZ_2:
452314ea4bb7Ssd 		switch (rbr_cfgb.bits.ldw.bufsz2) {
452414ea4bb7Ssd 		case RBR_BUFSZ2_2K:
452514ea4bb7Ssd 			sz = RBR_BUFSZ2_2K_BYTES;
452614ea4bb7Ssd 			break;
452714ea4bb7Ssd 		case RBR_BUFSZ2_4K:
452814ea4bb7Ssd 			sz = RBR_BUFSZ2_4K_BYTES;
452914ea4bb7Ssd 			break;
453014ea4bb7Ssd 		case RBR_BUFSZ2_8K:
453114ea4bb7Ssd 			sz = RBR_BUFSZ2_8K_BYTES;
453214ea4bb7Ssd 			break;
453314ea4bb7Ssd 		case RBR_BUFSZ2_16K:
453414ea4bb7Ssd 			sz = RBR_BUFSZ2_16K_BYTES;
453514ea4bb7Ssd 			break;
453614ea4bb7Ssd 		default:
453714ea4bb7Ssd 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
453814ea4bb7Ssd 			"nxge_get_pktbug_size: bad bufsz2"));
453914ea4bb7Ssd 			break;
454014ea4bb7Ssd 		}
454114ea4bb7Ssd 		break;
454214ea4bb7Ssd 	case RCR_SINGLE_BLOCK:
454314ea4bb7Ssd 		switch (rbr_cfgb.bits.ldw.bksize) {
454414ea4bb7Ssd 		case BKSIZE_4K:
454514ea4bb7Ssd 			sz = RBR_BKSIZE_4K_BYTES;
454614ea4bb7Ssd 			break;
454714ea4bb7Ssd 		case BKSIZE_8K:
454814ea4bb7Ssd 			sz = RBR_BKSIZE_8K_BYTES;
454914ea4bb7Ssd 			break;
455014ea4bb7Ssd 		case BKSIZE_16K:
455114ea4bb7Ssd 			sz = RBR_BKSIZE_16K_BYTES;
455214ea4bb7Ssd 			break;
455314ea4bb7Ssd 		case BKSIZE_32K:
455414ea4bb7Ssd 			sz = RBR_BKSIZE_32K_BYTES;
455514ea4bb7Ssd 			break;
455614ea4bb7Ssd 		default:
455714ea4bb7Ssd 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
455814ea4bb7Ssd 			"nxge_get_pktbug_size: bad bksize"));
455914ea4bb7Ssd 			break;
456014ea4bb7Ssd 		}
456114ea4bb7Ssd 		break;
456214ea4bb7Ssd 	default:
456314ea4bb7Ssd 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
456414ea4bb7Ssd 		"nxge_get_pktbug_size: bad bufsz_type"));
456514ea4bb7Ssd 		break;
456614ea4bb7Ssd 	}
456714ea4bb7Ssd 	return (sz);
456814ea4bb7Ssd }
4569