144961713Sgirish /*
244961713Sgirish  * CDDL HEADER START
344961713Sgirish  *
444961713Sgirish  * The contents of this file are subject to the terms of the
544961713Sgirish  * Common Development and Distribution License (the "License").
644961713Sgirish  * You may not use this file except in compliance with the License.
744961713Sgirish  *
844961713Sgirish  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
944961713Sgirish  * or http://www.opensolaris.org/os/licensing.
1044961713Sgirish  * See the License for the specific language governing permissions
1144961713Sgirish  * and limitations under the License.
1244961713Sgirish  *
1344961713Sgirish  * When distributing Covered Code, include this CDDL HEADER in each
1444961713Sgirish  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
1544961713Sgirish  * If applicable, add the following below this CDDL HEADER, with the
1644961713Sgirish  * fields enclosed by brackets "[]" replaced with your own identifying
1744961713Sgirish  * information: Portions Copyright [yyyy] [name of copyright owner]
1844961713Sgirish  *
1944961713Sgirish  * CDDL HEADER END
2044961713Sgirish  */
21ef523517SMichael Speer 
2244961713Sgirish /*
230dc2366fSVenugopal Iyer  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
2444961713Sgirish  * Use is subject to license terms.
2544961713Sgirish  */
2644961713Sgirish 
2744961713Sgirish #include <sys/nxge/nxge_impl.h>
2844961713Sgirish #include <sys/nxge/nxge_rxdma.h>
29678453a8Sspeer #include <sys/nxge/nxge_hio.h>
30678453a8Sspeer 
31678453a8Sspeer #if !defined(_BIG_ENDIAN)
32678453a8Sspeer #include <npi_rx_rd32.h>
33678453a8Sspeer #endif
34678453a8Sspeer #include <npi_rx_rd64.h>
35678453a8Sspeer #include <npi_rx_wr64.h>
3644961713Sgirish 
3744961713Sgirish #define	NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp)	\
38678453a8Sspeer 	(rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid)
3944961713Sgirish #define	NXGE_ACTUAL_RDC(nxgep, rdc)	\
4044961713Sgirish 	(rdc + nxgep->pt_config.hw_config.start_rdc)
4144961713Sgirish 
4244961713Sgirish /*
4344961713Sgirish  * Globals: tunable parameters (/etc/system or adb)
4444961713Sgirish  *
4544961713Sgirish  */
4644961713Sgirish extern uint32_t nxge_rbr_size;
4744961713Sgirish extern uint32_t nxge_rcr_size;
4844961713Sgirish extern uint32_t	nxge_rbr_spare_size;
494df55fdeSJanie Lu extern uint16_t	nxge_rdc_buf_offset;
5044961713Sgirish 
5144961713Sgirish extern uint32_t nxge_mblks_pending;
5244961713Sgirish 
5344961713Sgirish /*
5444961713Sgirish  * Tunable to reduce the amount of time spent in the
5544961713Sgirish  * ISR doing Rx Processing.
5644961713Sgirish  */
5744961713Sgirish extern uint32_t nxge_max_rx_pkts;
5844961713Sgirish 
5944961713Sgirish /*
6044961713Sgirish  * Tunables to manage the receive buffer blocks.
6144961713Sgirish  *
6244961713Sgirish  * nxge_rx_threshold_hi: copy all buffers.
6344961713Sgirish  * nxge_rx_bcopy_size_type: receive buffer block size type.
6444961713Sgirish  * nxge_rx_threshold_lo: copy only up to tunable block size type.
6544961713Sgirish  */
6644961713Sgirish extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi;
6744961713Sgirish extern nxge_rxbuf_type_t nxge_rx_buf_size_type;
6844961713Sgirish extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo;
6944961713Sgirish 
70b4d05839Sml extern uint32_t	nxge_cksum_offload;
71678453a8Sspeer 
72678453a8Sspeer static nxge_status_t nxge_map_rxdma(p_nxge_t, int);
73678453a8Sspeer static void nxge_unmap_rxdma(p_nxge_t, int);
7444961713Sgirish 
7544961713Sgirish static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t);
7644961713Sgirish 
77678453a8Sspeer static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int);
78678453a8Sspeer static void nxge_rxdma_hw_stop(p_nxge_t, int);
7944961713Sgirish 
8044961713Sgirish static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t,
8144961713Sgirish     p_nxge_dma_common_t *,  p_rx_rbr_ring_t *,
8244961713Sgirish     uint32_t,
8344961713Sgirish     p_nxge_dma_common_t *, p_rx_rcr_ring_t *,
8444961713Sgirish     p_rx_mbox_t *);
8544961713Sgirish static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t,
8644961713Sgirish     p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
8744961713Sgirish 
8844961713Sgirish static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t,
8944961713Sgirish     uint16_t,
9044961713Sgirish     p_nxge_dma_common_t *, p_rx_rbr_ring_t *,
9144961713Sgirish     p_rx_rcr_ring_t *, p_rx_mbox_t *);
9244961713Sgirish static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t,
9344961713Sgirish     p_rx_rcr_ring_t, p_rx_mbox_t);
9444961713Sgirish 
9544961713Sgirish static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t,
9644961713Sgirish     uint16_t,
9744961713Sgirish     p_nxge_dma_common_t *,
9844961713Sgirish     p_rx_rbr_ring_t *, uint32_t);
9944961713Sgirish static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t,
10044961713Sgirish     p_rx_rbr_ring_t);
10144961713Sgirish 
10244961713Sgirish static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t,
10344961713Sgirish     p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
10444961713Sgirish static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t);
10544961713Sgirish 
106678453a8Sspeer static mblk_t *
107678453a8Sspeer nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int);
10844961713Sgirish 
10944961713Sgirish static void nxge_receive_packet(p_nxge_t,
11044961713Sgirish 	p_rx_rcr_ring_t,
11144961713Sgirish 	p_rcr_entry_t,
11244961713Sgirish 	boolean_t *,
11344961713Sgirish 	mblk_t **, mblk_t **);
11444961713Sgirish 
11544961713Sgirish nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t);
11644961713Sgirish 
11744961713Sgirish static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t);
11844961713Sgirish static void nxge_freeb(p_rx_msg_t);
119678453a8Sspeer static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t);
12044961713Sgirish 
12144961713Sgirish static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t,
12244961713Sgirish 				uint32_t, uint32_t);
12344961713Sgirish 
12444961713Sgirish static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t,
12544961713Sgirish     p_rx_rbr_ring_t);
12644961713Sgirish 
12744961713Sgirish 
12844961713Sgirish static nxge_status_t
12944961713Sgirish nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t);
13044961713Sgirish 
13144961713Sgirish nxge_status_t
13244961713Sgirish nxge_rx_port_fatal_err_recover(p_nxge_t);
13344961713Sgirish 
134678453a8Sspeer static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t);
135678453a8Sspeer 
13644961713Sgirish nxge_status_t
nxge_init_rxdma_channels(p_nxge_t nxgep)13744961713Sgirish nxge_init_rxdma_channels(p_nxge_t nxgep)
13844961713Sgirish {
139e11f0814SMichael Speer 	nxge_grp_set_t	*set = &nxgep->rx_set;
140da14cebeSEric Cheng 	int		i, count, channel;
141e11f0814SMichael Speer 	nxge_grp_t	*group;
142da14cebeSEric Cheng 	dc_map_t	map;
143da14cebeSEric Cheng 	int		dev_gindex;
14444961713Sgirish 
14544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels"));
14644961713Sgirish 
147678453a8Sspeer 	if (!isLDOMguest(nxgep)) {
148678453a8Sspeer 		if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) {
149678453a8Sspeer 			cmn_err(CE_NOTE, "hw_start_common");
150678453a8Sspeer 			return (NXGE_ERROR);
151678453a8Sspeer 		}
152678453a8Sspeer 	}
153678453a8Sspeer 
154678453a8Sspeer 	/*
155678453a8Sspeer 	 * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8)
156678453a8Sspeer 	 * We only have 8 hardware RDC tables, but we may have
157678453a8Sspeer 	 * up to 16 logical (software-defined) groups of RDCS,
158678453a8Sspeer 	 * if we make use of layer 3 & 4 hardware classification.
159678453a8Sspeer 	 */
160678453a8Sspeer 	for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
161678453a8Sspeer 		if ((1 << i) & set->lg.map) {
162e11f0814SMichael Speer 			group = set->group[i];
163da14cebeSEric Cheng 			dev_gindex =
164da14cebeSEric Cheng 			    nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i;
165da14cebeSEric Cheng 			map = nxgep->pt_config.rdc_grps[dev_gindex].map;
166678453a8Sspeer 			for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
167da14cebeSEric Cheng 				if ((1 << channel) & map) {
168678453a8Sspeer 					if ((nxge_grp_dc_add(nxgep,
1696920a987SMisaki Miyashita 					    group, VP_BOUND_RX, channel)))
170e11f0814SMichael Speer 						goto init_rxdma_channels_exit;
171678453a8Sspeer 				}
172678453a8Sspeer 			}
173678453a8Sspeer 		}
174678453a8Sspeer 		if (++count == set->lg.count)
175678453a8Sspeer 			break;
17644961713Sgirish 	}
17744961713Sgirish 
178678453a8Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels"));
179678453a8Sspeer 	return (NXGE_OK);
180e11f0814SMichael Speer 
181e11f0814SMichael Speer init_rxdma_channels_exit:
182e11f0814SMichael Speer 	for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
183e11f0814SMichael Speer 		if ((1 << i) & set->lg.map) {
184e11f0814SMichael Speer 			group = set->group[i];
185da14cebeSEric Cheng 			dev_gindex =
186da14cebeSEric Cheng 			    nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i;
187da14cebeSEric Cheng 			map = nxgep->pt_config.rdc_grps[dev_gindex].map;
188da14cebeSEric Cheng 			for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
189da14cebeSEric Cheng 				if ((1 << channel) & map) {
190e11f0814SMichael Speer 					nxge_grp_dc_remove(nxgep,
191da14cebeSEric Cheng 					    VP_BOUND_RX, channel);
192e11f0814SMichael Speer 				}
193e11f0814SMichael Speer 			}
194e11f0814SMichael Speer 		}
195e11f0814SMichael Speer 		if (++count == set->lg.count)
196e11f0814SMichael Speer 			break;
197e11f0814SMichael Speer 	}
198e11f0814SMichael Speer 
199e11f0814SMichael Speer 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels"));
200e11f0814SMichael Speer 	return (NXGE_ERROR);
201678453a8Sspeer }
202678453a8Sspeer 
203678453a8Sspeer nxge_status_t
nxge_init_rxdma_channel(p_nxge_t nxge,int channel)204678453a8Sspeer nxge_init_rxdma_channel(p_nxge_t nxge, int channel)
205678453a8Sspeer {
20608ac1c49SNicolas Droux 	nxge_status_t	status;
207678453a8Sspeer 
208678453a8Sspeer 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel"));
209678453a8Sspeer 
210678453a8Sspeer 	status = nxge_map_rxdma(nxge, channel);
21144961713Sgirish 	if (status != NXGE_OK) {
212678453a8Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
213678453a8Sspeer 		    "<== nxge_init_rxdma: status 0x%x", status));
214678453a8Sspeer 		return (status);
21544961713Sgirish 	}
21644961713Sgirish 
21708ac1c49SNicolas Droux #if defined(sun4v)
21808ac1c49SNicolas Droux 	if (isLDOMguest(nxge)) {
21908ac1c49SNicolas Droux 		/* set rcr_ring */
22008ac1c49SNicolas Droux 		p_rx_rcr_ring_t ring = nxge->rx_rcr_rings->rcr_rings[channel];
22108ac1c49SNicolas Droux 
22208ac1c49SNicolas Droux 		status = nxge_hio_rxdma_bind_intr(nxge, ring, channel);
22308ac1c49SNicolas Droux 		if (status != NXGE_OK) {
22408ac1c49SNicolas Droux 			nxge_unmap_rxdma(nxge, channel);
22508ac1c49SNicolas Droux 			return (status);
22608ac1c49SNicolas Droux 		}
22708ac1c49SNicolas Droux 	}
22808ac1c49SNicolas Droux #endif
22908ac1c49SNicolas Droux 
230678453a8Sspeer 	status = nxge_rxdma_hw_start(nxge, channel);
23144961713Sgirish 	if (status != NXGE_OK) {
232678453a8Sspeer 		nxge_unmap_rxdma(nxge, channel);
23344961713Sgirish 	}
23444961713Sgirish 
235678453a8Sspeer 	if (!nxge->statsp->rdc_ksp[channel])
236678453a8Sspeer 		nxge_setup_rdc_kstats(nxge, channel);
237678453a8Sspeer 
238678453a8Sspeer 	NXGE_DEBUG_MSG((nxge, MEM2_CTL,
239678453a8Sspeer 	    "<== nxge_init_rxdma_channel: status 0x%x", status));
24044961713Sgirish 
24144961713Sgirish 	return (status);
24244961713Sgirish }
24344961713Sgirish 
24444961713Sgirish void
nxge_uninit_rxdma_channels(p_nxge_t nxgep)24544961713Sgirish nxge_uninit_rxdma_channels(p_nxge_t nxgep)
24644961713Sgirish {
247678453a8Sspeer 	nxge_grp_set_t *set = &nxgep->rx_set;
248678453a8Sspeer 	int rdc;
249678453a8Sspeer 
25044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels"));
25144961713Sgirish 
252678453a8Sspeer 	if (set->owned.map == 0) {
253678453a8Sspeer 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
254678453a8Sspeer 		    "nxge_uninit_rxdma_channels: no channels"));
255678453a8Sspeer 		return;
256678453a8Sspeer 	}
25744961713Sgirish 
258678453a8Sspeer 	for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
259678453a8Sspeer 		if ((1 << rdc) & set->owned.map) {
260678453a8Sspeer 			nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc);
261678453a8Sspeer 		}
262678453a8Sspeer 	}
263678453a8Sspeer 
264678453a8Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels"));
265678453a8Sspeer }
266678453a8Sspeer 
267678453a8Sspeer void
nxge_uninit_rxdma_channel(p_nxge_t nxgep,int channel)268678453a8Sspeer nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel)
269678453a8Sspeer {
270678453a8Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel"));
271678453a8Sspeer 
272678453a8Sspeer 	if (nxgep->statsp->rdc_ksp[channel]) {
273678453a8Sspeer 		kstat_delete(nxgep->statsp->rdc_ksp[channel]);
274678453a8Sspeer 		nxgep->statsp->rdc_ksp[channel] = 0;
275678453a8Sspeer 	}
276678453a8Sspeer 
277678453a8Sspeer 	nxge_rxdma_hw_stop(nxgep, channel);
278678453a8Sspeer 	nxge_unmap_rxdma(nxgep, channel);
279678453a8Sspeer 
280678453a8Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel"));
28144961713Sgirish }
28244961713Sgirish 
28344961713Sgirish nxge_status_t
nxge_reset_rxdma_channel(p_nxge_t nxgep,uint16_t channel)28444961713Sgirish nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel)
28544961713Sgirish {
28644961713Sgirish 	npi_handle_t		handle;
28744961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
28844961713Sgirish 	nxge_status_t		status = NXGE_OK;
28944961713Sgirish 
290330cd344SMichael Speer 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_reset_rxdma_channel"));
29144961713Sgirish 
29244961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
29344961713Sgirish 	rs = npi_rxdma_cfg_rdc_reset(handle, channel);
29444961713Sgirish 
29544961713Sgirish 	if (rs != NPI_SUCCESS) {
29644961713Sgirish 		status = NXGE_ERROR | rs;
29744961713Sgirish 	}
29844961713Sgirish 
299330cd344SMichael Speer 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel"));
300330cd344SMichael Speer 
30144961713Sgirish 	return (status);
30244961713Sgirish }
30344961713Sgirish 
30444961713Sgirish void
nxge_rxdma_regs_dump_channels(p_nxge_t nxgep)30544961713Sgirish nxge_rxdma_regs_dump_channels(p_nxge_t nxgep)
30644961713Sgirish {
307678453a8Sspeer 	nxge_grp_set_t *set = &nxgep->rx_set;
308678453a8Sspeer 	int rdc;
30944961713Sgirish 
31044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels"));
31144961713Sgirish 
312678453a8Sspeer 	if (!isLDOMguest(nxgep)) {
313678453a8Sspeer 		npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep);
314678453a8Sspeer 		(void) npi_rxdma_dump_fzc_regs(handle);
31544961713Sgirish 	}
316678453a8Sspeer 
317678453a8Sspeer 	if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
318678453a8Sspeer 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
319678453a8Sspeer 		    "nxge_rxdma_regs_dump_channels: "
320678453a8Sspeer 		    "NULL ring pointer(s)"));
32144961713Sgirish 		return;
32244961713Sgirish 	}
32344961713Sgirish 
324678453a8Sspeer 	if (set->owned.map == 0) {
32544961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
326678453a8Sspeer 		    "nxge_rxdma_regs_dump_channels: no channels"));
32744961713Sgirish 		return;
32844961713Sgirish 	}
32944961713Sgirish 
330678453a8Sspeer 	for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
331678453a8Sspeer 		if ((1 << rdc) & set->owned.map) {
332678453a8Sspeer 			rx_rbr_ring_t *ring =
333678453a8Sspeer 			    nxgep->rx_rbr_rings->rbr_rings[rdc];
334678453a8Sspeer 			if (ring) {
335678453a8Sspeer 				(void) nxge_dump_rxdma_channel(nxgep, rdc);
336678453a8Sspeer 			}
33744961713Sgirish 		}
33844961713Sgirish 	}
33944961713Sgirish 
34044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump"));
34144961713Sgirish }
34244961713Sgirish 
34344961713Sgirish nxge_status_t
nxge_dump_rxdma_channel(p_nxge_t nxgep,uint8_t channel)34444961713Sgirish nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel)
34544961713Sgirish {
34644961713Sgirish 	npi_handle_t		handle;
34744961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
34844961713Sgirish 	nxge_status_t		status = NXGE_OK;
34944961713Sgirish 
35044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel"));
35144961713Sgirish 
35244961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
35344961713Sgirish 	rs = npi_rxdma_dump_rdc_regs(handle, channel);
35444961713Sgirish 
35544961713Sgirish 	if (rs != NPI_SUCCESS) {
35644961713Sgirish 		status = NXGE_ERROR | rs;
35744961713Sgirish 	}
35844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel"));
35944961713Sgirish 	return (status);
36044961713Sgirish }
36144961713Sgirish 
36244961713Sgirish nxge_status_t
nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep,uint16_t channel,p_rx_dma_ent_msk_t mask_p)36344961713Sgirish nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
36444961713Sgirish     p_rx_dma_ent_msk_t mask_p)
36544961713Sgirish {
36644961713Sgirish 	npi_handle_t		handle;
36744961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
36844961713Sgirish 	nxge_status_t		status = NXGE_OK;
36944961713Sgirish 
37044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
37152ccf843Smisaki 	    "<== nxge_init_rxdma_channel_event_mask"));
37244961713Sgirish 
37344961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
37444961713Sgirish 	rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p);
37544961713Sgirish 	if (rs != NPI_SUCCESS) {
37644961713Sgirish 		status = NXGE_ERROR | rs;
37744961713Sgirish 	}
37844961713Sgirish 
37944961713Sgirish 	return (status);
38044961713Sgirish }
38144961713Sgirish 
38244961713Sgirish nxge_status_t
nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep,uint16_t channel,p_rx_dma_ctl_stat_t cs_p)38344961713Sgirish nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
38444961713Sgirish     p_rx_dma_ctl_stat_t cs_p)
38544961713Sgirish {
38644961713Sgirish 	npi_handle_t		handle;
38744961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
38844961713Sgirish 	nxge_status_t		status = NXGE_OK;
38944961713Sgirish 
39044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
39152ccf843Smisaki 	    "<== nxge_init_rxdma_channel_cntl_stat"));
39244961713Sgirish 
39344961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
39444961713Sgirish 	rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p);
39544961713Sgirish 
39644961713Sgirish 	if (rs != NPI_SUCCESS) {
39744961713Sgirish 		status = NXGE_ERROR | rs;
39844961713Sgirish 	}
39944961713Sgirish 
40044961713Sgirish 	return (status);
40144961713Sgirish }
40244961713Sgirish 
403678453a8Sspeer /*
404678453a8Sspeer  * nxge_rxdma_cfg_rdcgrp_default_rdc
405678453a8Sspeer  *
406678453a8Sspeer  *	Set the default RDC for an RDC Group (Table)
407678453a8Sspeer  *
408678453a8Sspeer  * Arguments:
409*86ef0a63SRichard Lowe  *	nxgep
410678453a8Sspeer  *	rdcgrp	The group to modify
411678453a8Sspeer  *	rdc	The new default RDC.
412678453a8Sspeer  *
413678453a8Sspeer  * Notes:
414678453a8Sspeer  *
415678453a8Sspeer  * NPI/NXGE function calls:
416678453a8Sspeer  *	npi_rxdma_cfg_rdc_table_default_rdc()
417678453a8Sspeer  *
418678453a8Sspeer  * Registers accessed:
419678453a8Sspeer  *	RDC_TBL_REG: FZC_ZCP + 0x10000
420678453a8Sspeer  *
421678453a8Sspeer  * Context:
422678453a8Sspeer  *	Service domain
423678453a8Sspeer  */
42444961713Sgirish nxge_status_t
nxge_rxdma_cfg_rdcgrp_default_rdc(p_nxge_t nxgep,uint8_t rdcgrp,uint8_t rdc)425678453a8Sspeer nxge_rxdma_cfg_rdcgrp_default_rdc(
426678453a8Sspeer 	p_nxge_t nxgep,
427678453a8Sspeer 	uint8_t rdcgrp,
428678453a8Sspeer 	uint8_t rdc)
42944961713Sgirish {
43044961713Sgirish 	npi_handle_t		handle;
43144961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
43244961713Sgirish 	p_nxge_dma_pt_cfg_t	p_dma_cfgp;
43344961713Sgirish 	p_nxge_rdc_grp_t	rdc_grp_p;
43444961713Sgirish 	uint8_t actual_rdcgrp, actual_rdc;
43544961713Sgirish 
43644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
43752ccf843Smisaki 	    " ==> nxge_rxdma_cfg_rdcgrp_default_rdc"));
43844961713Sgirish 	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
43944961713Sgirish 
44044961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
44144961713Sgirish 
442678453a8Sspeer 	/*
443678453a8Sspeer 	 * This has to be rewritten.  Do we even allow this anymore?
444678453a8Sspeer 	 */
44544961713Sgirish 	rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp];
446678453a8Sspeer 	RDC_MAP_IN(rdc_grp_p->map, rdc);
447678453a8Sspeer 	rdc_grp_p->def_rdc = rdc;
44844961713Sgirish 
44944961713Sgirish 	actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp);
45044961713Sgirish 	actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc);
45144961713Sgirish 
452678453a8Sspeer 	rs = npi_rxdma_cfg_rdc_table_default_rdc(
45352ccf843Smisaki 	    handle, actual_rdcgrp, actual_rdc);
45444961713Sgirish 
45544961713Sgirish 	if (rs != NPI_SUCCESS) {
45644961713Sgirish 		return (NXGE_ERROR | rs);
45744961713Sgirish 	}
45844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
45952ccf843Smisaki 	    " <== nxge_rxdma_cfg_rdcgrp_default_rdc"));
46044961713Sgirish 	return (NXGE_OK);
46144961713Sgirish }
46244961713Sgirish 
46344961713Sgirish nxge_status_t
nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep,uint8_t port,uint8_t rdc)46444961713Sgirish nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc)
46544961713Sgirish {
46644961713Sgirish 	npi_handle_t		handle;
46744961713Sgirish 
46844961713Sgirish 	uint8_t actual_rdc;
46944961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
47044961713Sgirish 
47144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
47252ccf843Smisaki 	    " ==> nxge_rxdma_cfg_port_default_rdc"));
47344961713Sgirish 
47444961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
475678453a8Sspeer 	actual_rdc = rdc;	/* XXX Hack! */
47644961713Sgirish 	rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc);
47744961713Sgirish 
47844961713Sgirish 
47944961713Sgirish 	if (rs != NPI_SUCCESS) {
48044961713Sgirish 		return (NXGE_ERROR | rs);
48144961713Sgirish 	}
48244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
48352ccf843Smisaki 	    " <== nxge_rxdma_cfg_port_default_rdc"));
48444961713Sgirish 
48544961713Sgirish 	return (NXGE_OK);
48644961713Sgirish }
48744961713Sgirish 
48844961713Sgirish nxge_status_t
nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep,uint8_t channel,uint16_t pkts)48944961713Sgirish nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel,
490*86ef0a63SRichard Lowe     uint16_t pkts)
49144961713Sgirish {
49244961713Sgirish 	npi_status_t	rs = NPI_SUCCESS;
49344961713Sgirish 	npi_handle_t	handle;
49444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
49552ccf843Smisaki 	    " ==> nxge_rxdma_cfg_rcr_threshold"));
49644961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
49744961713Sgirish 
49844961713Sgirish 	rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts);
49944961713Sgirish 
50044961713Sgirish 	if (rs != NPI_SUCCESS) {
50144961713Sgirish 		return (NXGE_ERROR | rs);
50244961713Sgirish 	}
50344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold"));
50444961713Sgirish 	return (NXGE_OK);
50544961713Sgirish }
50644961713Sgirish 
50744961713Sgirish nxge_status_t
nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep,uint8_t channel,uint16_t tout,uint8_t enable)50844961713Sgirish nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel,
509*86ef0a63SRichard Lowe     uint16_t tout, uint8_t enable)
51044961713Sgirish {
51144961713Sgirish 	npi_status_t	rs = NPI_SUCCESS;
51244961713Sgirish 	npi_handle_t	handle;
51344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout"));
51444961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
51544961713Sgirish 	if (enable == 0) {
51644961713Sgirish 		rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel);
51744961713Sgirish 	} else {
51844961713Sgirish 		rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
51952ccf843Smisaki 		    tout);
52044961713Sgirish 	}
52144961713Sgirish 
52244961713Sgirish 	if (rs != NPI_SUCCESS) {
52344961713Sgirish 		return (NXGE_ERROR | rs);
52444961713Sgirish 	}
52544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout"));
52644961713Sgirish 	return (NXGE_OK);
52744961713Sgirish }
52844961713Sgirish 
52944961713Sgirish nxge_status_t
nxge_enable_rxdma_channel(p_nxge_t nxgep,uint16_t channel,p_rx_rbr_ring_t rbr_p,p_rx_rcr_ring_t rcr_p,p_rx_mbox_t mbox_p)53044961713Sgirish nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
53144961713Sgirish     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
53244961713Sgirish {
53344961713Sgirish 	npi_handle_t		handle;
534*86ef0a63SRichard Lowe 	rdc_desc_cfg_t		rdc_desc;
53544961713Sgirish 	p_rcrcfig_b_t		cfgb_p;
53644961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
53744961713Sgirish 
53844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel"));
53944961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
54044961713Sgirish 	/*
54144961713Sgirish 	 * Use configuration data composed at init time.
54244961713Sgirish 	 * Write to hardware the receive ring configurations.
54344961713Sgirish 	 */
54444961713Sgirish 	rdc_desc.mbox_enable = 1;
54544961713Sgirish 	rdc_desc.mbox_addr = mbox_p->mbox_addr;
54644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
54752ccf843Smisaki 	    "==> nxge_enable_rxdma_channel: mboxp $%p($%p)",
54852ccf843Smisaki 	    mbox_p->mbox_addr, rdc_desc.mbox_addr));
54944961713Sgirish 
55044961713Sgirish 	rdc_desc.rbr_len = rbr_p->rbb_max;
55144961713Sgirish 	rdc_desc.rbr_addr = rbr_p->rbr_addr;
55244961713Sgirish 
55344961713Sgirish 	switch (nxgep->rx_bksize_code) {
55444961713Sgirish 	case RBR_BKSIZE_4K:
55544961713Sgirish 		rdc_desc.page_size = SIZE_4KB;
55644961713Sgirish 		break;
55744961713Sgirish 	case RBR_BKSIZE_8K:
55844961713Sgirish 		rdc_desc.page_size = SIZE_8KB;
55944961713Sgirish 		break;
56044961713Sgirish 	case RBR_BKSIZE_16K:
56144961713Sgirish 		rdc_desc.page_size = SIZE_16KB;
56244961713Sgirish 		break;
56344961713Sgirish 	case RBR_BKSIZE_32K:
56444961713Sgirish 		rdc_desc.page_size = SIZE_32KB;
56544961713Sgirish 		break;
56644961713Sgirish 	}
56744961713Sgirish 
56844961713Sgirish 	rdc_desc.size0 = rbr_p->npi_pkt_buf_size0;
56944961713Sgirish 	rdc_desc.valid0 = 1;
57044961713Sgirish 
57144961713Sgirish 	rdc_desc.size1 = rbr_p->npi_pkt_buf_size1;
57244961713Sgirish 	rdc_desc.valid1 = 1;
57344961713Sgirish 
57444961713Sgirish 	rdc_desc.size2 = rbr_p->npi_pkt_buf_size2;
57544961713Sgirish 	rdc_desc.valid2 = 1;
57644961713Sgirish 
57744961713Sgirish 	rdc_desc.full_hdr = rcr_p->full_hdr_flag;
57844961713Sgirish 	rdc_desc.offset = rcr_p->sw_priv_hdr_len;
57944961713Sgirish 
58044961713Sgirish 	rdc_desc.rcr_len = rcr_p->comp_size;
58144961713Sgirish 	rdc_desc.rcr_addr = rcr_p->rcr_addr;
58244961713Sgirish 
58344961713Sgirish 	cfgb_p = &(rcr_p->rcr_cfgb);
58444961713Sgirish 	rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres;
585678453a8Sspeer 	/* For now, disable this timeout in a guest domain. */
586678453a8Sspeer 	if (isLDOMguest(nxgep)) {
587678453a8Sspeer 		rdc_desc.rcr_timeout = 0;
588678453a8Sspeer 		rdc_desc.rcr_timeout_enable = 0;
589678453a8Sspeer 	} else {
590678453a8Sspeer 		rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout;
591678453a8Sspeer 		rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout;
592678453a8Sspeer 	}
59344961713Sgirish 
59444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: "
59552ccf843Smisaki 	    "rbr_len qlen %d pagesize code %d rcr_len %d",
59652ccf843Smisaki 	    rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len));
59744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: "
59852ccf843Smisaki 	    "size 0 %d size 1 %d size 2 %d",
59952ccf843Smisaki 	    rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1,
60052ccf843Smisaki 	    rbr_p->npi_pkt_buf_size2));
60144961713Sgirish 
6024df55fdeSJanie Lu 	if (nxgep->niu_hw_type == NIU_HW_TYPE_RF)
6034df55fdeSJanie Lu 		rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc,
6044df55fdeSJanie Lu 		    &rdc_desc, B_TRUE);
6054df55fdeSJanie Lu 	else
6064df55fdeSJanie Lu 		rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc,
6074df55fdeSJanie Lu 		    &rdc_desc, B_FALSE);
60844961713Sgirish 	if (rs != NPI_SUCCESS) {
60944961713Sgirish 		return (NXGE_ERROR | rs);
61044961713Sgirish 	}
61144961713Sgirish 
61244961713Sgirish 	/*
61344961713Sgirish 	 * Enable the timeout and threshold.
61444961713Sgirish 	 */
61544961713Sgirish 	rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel,
61652ccf843Smisaki 	    rdc_desc.rcr_threshold);
61744961713Sgirish 	if (rs != NPI_SUCCESS) {
61844961713Sgirish 		return (NXGE_ERROR | rs);
61944961713Sgirish 	}
62044961713Sgirish 
62144961713Sgirish 	rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
62252ccf843Smisaki 	    rdc_desc.rcr_timeout);
62344961713Sgirish 	if (rs != NPI_SUCCESS) {
62444961713Sgirish 		return (NXGE_ERROR | rs);
62544961713Sgirish 	}
62644961713Sgirish 
627e759c33aSMichael Speer 	if (!isLDOMguest(nxgep)) {
628e759c33aSMichael Speer 		/* Enable the DMA */
629e759c33aSMichael Speer 		rs = npi_rxdma_cfg_rdc_enable(handle, channel);
630e759c33aSMichael Speer 		if (rs != NPI_SUCCESS) {
631e759c33aSMichael Speer 			return (NXGE_ERROR | rs);
632e759c33aSMichael Speer 		}
63344961713Sgirish 	}
63444961713Sgirish 
63544961713Sgirish 	/* Kick the DMA engine. */
63644961713Sgirish 	npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max);
637e759c33aSMichael Speer 
638e759c33aSMichael Speer 	if (!isLDOMguest(nxgep)) {
639e759c33aSMichael Speer 		/* Clear the rbr empty bit */
640e759c33aSMichael Speer 		(void) npi_rxdma_channel_rbr_empty_clear(handle, channel);
641e759c33aSMichael Speer 	}
64244961713Sgirish 
64344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel"));
64444961713Sgirish 
64544961713Sgirish 	return (NXGE_OK);
64644961713Sgirish }
64744961713Sgirish 
64844961713Sgirish nxge_status_t
nxge_disable_rxdma_channel(p_nxge_t nxgep,uint16_t channel)64944961713Sgirish nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel)
65044961713Sgirish {
65144961713Sgirish 	npi_handle_t		handle;
65244961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
65344961713Sgirish 
65444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel"));
65544961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
65644961713Sgirish 
65744961713Sgirish 	/* disable the DMA */
65844961713Sgirish 	rs = npi_rxdma_cfg_rdc_disable(handle, channel);
65944961713Sgirish 	if (rs != NPI_SUCCESS) {
66044961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
66152ccf843Smisaki 		    "<== nxge_disable_rxdma_channel:failed (0x%x)",
66252ccf843Smisaki 		    rs));
66344961713Sgirish 		return (NXGE_ERROR | rs);
66444961713Sgirish 	}
66544961713Sgirish 
66644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel"));
66744961713Sgirish 	return (NXGE_OK);
66844961713Sgirish }
66944961713Sgirish 
67044961713Sgirish nxge_status_t
nxge_rxdma_channel_rcrflush(p_nxge_t nxgep,uint8_t channel)67144961713Sgirish nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel)
67244961713Sgirish {
67344961713Sgirish 	npi_handle_t		handle;
67444961713Sgirish 	nxge_status_t		status = NXGE_OK;
67544961713Sgirish 
67644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
67752ccf843Smisaki 	    "<== nxge_init_rxdma_channel_rcrflush"));
67844961713Sgirish 
67944961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
68044961713Sgirish 	npi_rxdma_rdc_rcr_flush(handle, channel);
68144961713Sgirish 
68244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
68352ccf843Smisaki 	    "<== nxge_init_rxdma_channel_rcrflsh"));
68444961713Sgirish 	return (status);
68544961713Sgirish 
68644961713Sgirish }
68744961713Sgirish 
68844961713Sgirish #define	MID_INDEX(l, r) ((r + l + 1) >> 1)
68944961713Sgirish 
69044961713Sgirish #define	TO_LEFT -1
69144961713Sgirish #define	TO_RIGHT 1
69244961713Sgirish #define	BOTH_RIGHT (TO_RIGHT + TO_RIGHT)
69344961713Sgirish #define	BOTH_LEFT (TO_LEFT + TO_LEFT)
69444961713Sgirish #define	IN_MIDDLE (TO_RIGHT + TO_LEFT)
69544961713Sgirish #define	NO_HINT 0xffffffff
69644961713Sgirish 
69744961713Sgirish /*ARGSUSED*/
69844961713Sgirish nxge_status_t
nxge_rxbuf_pp_to_vp(p_nxge_t nxgep,p_rx_rbr_ring_t rbr_p,uint8_t pktbufsz_type,uint64_t * pkt_buf_addr_pp,uint64_t ** pkt_buf_addr_p,uint32_t * bufoffset,uint32_t * msg_index)69944961713Sgirish nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p,
700*86ef0a63SRichard Lowe     uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp,
701*86ef0a63SRichard Lowe     uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index)
70244961713Sgirish {
70344961713Sgirish 	int			bufsize;
70444961713Sgirish 	uint64_t		pktbuf_pp;
705*86ef0a63SRichard Lowe 	uint64_t		dvma_addr;
706*86ef0a63SRichard Lowe 	rxring_info_t		*ring_info;
707*86ef0a63SRichard Lowe 	int			base_side, end_side;
708*86ef0a63SRichard Lowe 	int			r_index, l_index, anchor_index;
709*86ef0a63SRichard Lowe 	int			found, search_done;
71044961713Sgirish 	uint32_t offset, chunk_size, block_size, page_size_mask;
71144961713Sgirish 	uint32_t chunk_index, block_index, total_index;
712*86ef0a63SRichard Lowe 	int			max_iterations, iteration;
713*86ef0a63SRichard Lowe 	rxbuf_index_info_t	*bufinfo;
71444961713Sgirish 
71544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp"));
71644961713Sgirish 
71744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
71852ccf843Smisaki 	    "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d",
71952ccf843Smisaki 	    pkt_buf_addr_pp,
72052ccf843Smisaki 	    pktbufsz_type));
72144961713Sgirish 	pktbuf_pp = (uint64_t)pkt_buf_addr_pp;
72244961713Sgirish 
72344961713Sgirish 	switch (pktbufsz_type) {
72444961713Sgirish 	case 0:
72544961713Sgirish 		bufsize = rbr_p->pkt_buf_size0;
72644961713Sgirish 		break;
72744961713Sgirish 	case 1:
72844961713Sgirish 		bufsize = rbr_p->pkt_buf_size1;
72944961713Sgirish 		break;
73044961713Sgirish 	case 2:
73144961713Sgirish 		bufsize = rbr_p->pkt_buf_size2;
73244961713Sgirish 		break;
73344961713Sgirish 	case RCR_SINGLE_BLOCK:
73444961713Sgirish 		bufsize = 0;
73544961713Sgirish 		anchor_index = 0;
73644961713Sgirish 		break;
73744961713Sgirish 	default:
73844961713Sgirish 		return (NXGE_ERROR);
73944961713Sgirish 	}
74044961713Sgirish 
74144961713Sgirish 	if (rbr_p->num_blocks == 1) {
74244961713Sgirish 		anchor_index = 0;
74344961713Sgirish 		ring_info = rbr_p->ring_info;
74444961713Sgirish 		bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
74544961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
74652ccf843Smisaki 		    "==> nxge_rxbuf_pp_to_vp: (found, 1 block) "
74752ccf843Smisaki 		    "buf_pp $%p btype %d anchor_index %d "
74852ccf843Smisaki 		    "bufinfo $%p",
74952ccf843Smisaki 		    pkt_buf_addr_pp,
75052ccf843Smisaki 		    pktbufsz_type,
75152ccf843Smisaki 		    anchor_index,
75252ccf843Smisaki 		    bufinfo));
75344961713Sgirish 
75444961713Sgirish 		goto found_index;
75544961713Sgirish 	}
75644961713Sgirish 
75744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
75852ccf843Smisaki 	    "==> nxge_rxbuf_pp_to_vp: "
75952ccf843Smisaki 	    "buf_pp $%p btype %d  anchor_index %d",
76052ccf843Smisaki 	    pkt_buf_addr_pp,
76152ccf843Smisaki 	    pktbufsz_type,
76252ccf843Smisaki 	    anchor_index));
76344961713Sgirish 
76444961713Sgirish 	ring_info = rbr_p->ring_info;
76544961713Sgirish 	found = B_FALSE;
76644961713Sgirish 	bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
76744961713Sgirish 	iteration = 0;
76844961713Sgirish 	max_iterations = ring_info->max_iterations;
76944961713Sgirish 		/*
770a3c5bd6dSspeer 		 * First check if this block has been seen
77144961713Sgirish 		 * recently. This is indicated by a hint which
77244961713Sgirish 		 * is initialized when the first buffer of the block
77344961713Sgirish 		 * is seen. The hint is reset when the last buffer of
77444961713Sgirish 		 * the block has been processed.
77544961713Sgirish 		 * As three block sizes are supported, three hints
77644961713Sgirish 		 * are kept. The idea behind the hints is that once
777*86ef0a63SRichard Lowe 		 * the hardware	 uses a block for a buffer  of that
77844961713Sgirish 		 * size, it will use it exclusively for that size
77944961713Sgirish 		 * and will use it until it is exhausted. It is assumed
78044961713Sgirish 		 * that there would a single block being used for the same
78144961713Sgirish 		 * buffer sizes at any given time.
78244961713Sgirish 		 */
78344961713Sgirish 	if (ring_info->hint[pktbufsz_type] != NO_HINT) {
78444961713Sgirish 		anchor_index = ring_info->hint[pktbufsz_type];
78544961713Sgirish 		dvma_addr =  bufinfo[anchor_index].dvma_addr;
78644961713Sgirish 		chunk_size = bufinfo[anchor_index].buf_size;
78744961713Sgirish 		if ((pktbuf_pp >= dvma_addr) &&
78852ccf843Smisaki 		    (pktbuf_pp < (dvma_addr + chunk_size))) {
78944961713Sgirish 			found = B_TRUE;
79044961713Sgirish 				/*
79144961713Sgirish 				 * check if this is the last buffer in the block
79244961713Sgirish 				 * If so, then reset the hint for the size;
79344961713Sgirish 				 */
79444961713Sgirish 
79544961713Sgirish 			if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size))
79644961713Sgirish 				ring_info->hint[pktbufsz_type] = NO_HINT;
79744961713Sgirish 		}
79844961713Sgirish 	}
79944961713Sgirish 
80044961713Sgirish 	if (found == B_FALSE) {
80144961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
80252ccf843Smisaki 		    "==> nxge_rxbuf_pp_to_vp: (!found)"
80352ccf843Smisaki 		    "buf_pp $%p btype %d anchor_index %d",
80452ccf843Smisaki 		    pkt_buf_addr_pp,
80552ccf843Smisaki 		    pktbufsz_type,
80652ccf843Smisaki 		    anchor_index));
80744961713Sgirish 
80844961713Sgirish 			/*
80944961713Sgirish 			 * This is the first buffer of the block of this
81044961713Sgirish 			 * size. Need to search the whole information
81144961713Sgirish 			 * array.
81244961713Sgirish 			 * the search algorithm uses a binary tree search
81344961713Sgirish 			 * algorithm. It assumes that the information is
81444961713Sgirish 			 * already sorted with increasing order
815*86ef0a63SRichard Lowe 			 * info[0] < info[1] < info[2]	.... < info[n-1]
81644961713Sgirish 			 * where n is the size of the information array
81744961713Sgirish 			 */
81844961713Sgirish 		r_index = rbr_p->num_blocks - 1;
81944961713Sgirish 		l_index = 0;
82044961713Sgirish 		search_done = B_FALSE;
82144961713Sgirish 		anchor_index = MID_INDEX(r_index, l_index);
82244961713Sgirish 		while (search_done == B_FALSE) {
82344961713Sgirish 			if ((r_index == l_index) ||
82452ccf843Smisaki 			    (iteration >= max_iterations))
82544961713Sgirish 				search_done = B_TRUE;
82644961713Sgirish 			end_side = TO_RIGHT; /* to the right */
82744961713Sgirish 			base_side = TO_LEFT; /* to the left */
82844961713Sgirish 			/* read the DVMA address information and sort it */
82944961713Sgirish 			dvma_addr =  bufinfo[anchor_index].dvma_addr;
83044961713Sgirish 			chunk_size = bufinfo[anchor_index].buf_size;
83144961713Sgirish 			NXGE_DEBUG_MSG((nxgep, RX2_CTL,
83252ccf843Smisaki 			    "==> nxge_rxbuf_pp_to_vp: (searching)"
83352ccf843Smisaki 			    "buf_pp $%p btype %d "
83452ccf843Smisaki 			    "anchor_index %d chunk_size %d dvmaaddr $%p",
83552ccf843Smisaki 			    pkt_buf_addr_pp,
83652ccf843Smisaki 			    pktbufsz_type,
83752ccf843Smisaki 			    anchor_index,
83852ccf843Smisaki 			    chunk_size,
83952ccf843Smisaki 			    dvma_addr));
84044961713Sgirish 
84144961713Sgirish 			if (pktbuf_pp >= dvma_addr)
84244961713Sgirish 				base_side = TO_RIGHT; /* to the right */
84344961713Sgirish 			if (pktbuf_pp < (dvma_addr + chunk_size))
84444961713Sgirish 				end_side = TO_LEFT; /* to the left */
84544961713Sgirish 
84644961713Sgirish 			switch (base_side + end_side) {
84752ccf843Smisaki 			case IN_MIDDLE:
84852ccf843Smisaki 				/* found */
84952ccf843Smisaki 				found = B_TRUE;
85052ccf843Smisaki 				search_done = B_TRUE;
85152ccf843Smisaki 				if ((pktbuf_pp + bufsize) <
85252ccf843Smisaki 				    (dvma_addr + chunk_size))
85352ccf843Smisaki 					ring_info->hint[pktbufsz_type] =
85452ccf843Smisaki 					    bufinfo[anchor_index].buf_index;
85552ccf843Smisaki 				break;
85652ccf843Smisaki 			case BOTH_RIGHT:
85752ccf843Smisaki 				/* not found: go to the right */
85852ccf843Smisaki 				l_index = anchor_index + 1;
85952ccf843Smisaki 				anchor_index = MID_INDEX(r_index, l_index);
86052ccf843Smisaki 				break;
86152ccf843Smisaki 
86252ccf843Smisaki 			case BOTH_LEFT:
86352ccf843Smisaki 				/* not found: go to the left */
86452ccf843Smisaki 				r_index = anchor_index - 1;
86552ccf843Smisaki 				anchor_index = MID_INDEX(r_index, l_index);
86652ccf843Smisaki 				break;
86752ccf843Smisaki 			default: /* should not come here */
86852ccf843Smisaki 				return (NXGE_ERROR);
86944961713Sgirish 			}
87044961713Sgirish 			iteration++;
87144961713Sgirish 		}
87244961713Sgirish 
87344961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
87452ccf843Smisaki 		    "==> nxge_rxbuf_pp_to_vp: (search done)"
87552ccf843Smisaki 		    "buf_pp $%p btype %d anchor_index %d",
87652ccf843Smisaki 		    pkt_buf_addr_pp,
87752ccf843Smisaki 		    pktbufsz_type,
87852ccf843Smisaki 		    anchor_index));
87944961713Sgirish 	}
88044961713Sgirish 
88144961713Sgirish 	if (found == B_FALSE) {
88244961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
88352ccf843Smisaki 		    "==> nxge_rxbuf_pp_to_vp: (search failed)"
88452ccf843Smisaki 		    "buf_pp $%p btype %d anchor_index %d",
88552ccf843Smisaki 		    pkt_buf_addr_pp,
88652ccf843Smisaki 		    pktbufsz_type,
88752ccf843Smisaki 		    anchor_index));
88844961713Sgirish 		return (NXGE_ERROR);
88944961713Sgirish 	}
89044961713Sgirish 
89144961713Sgirish found_index:
89244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
89352ccf843Smisaki 	    "==> nxge_rxbuf_pp_to_vp: (FOUND1)"
89452ccf843Smisaki 	    "buf_pp $%p btype %d bufsize %d anchor_index %d",
89552ccf843Smisaki 	    pkt_buf_addr_pp,
89652ccf843Smisaki 	    pktbufsz_type,
89752ccf843Smisaki 	    bufsize,
89852ccf843Smisaki 	    anchor_index));
89944961713Sgirish 
90044961713Sgirish 	/* index of the first block in this chunk */
90144961713Sgirish 	chunk_index = bufinfo[anchor_index].start_index;
90244961713Sgirish 	dvma_addr =  bufinfo[anchor_index].dvma_addr;
90344961713Sgirish 	page_size_mask = ring_info->block_size_mask;
90444961713Sgirish 
90544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
90652ccf843Smisaki 	    "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)"
90752ccf843Smisaki 	    "buf_pp $%p btype %d bufsize %d "
90852ccf843Smisaki 	    "anchor_index %d chunk_index %d dvma $%p",
90952ccf843Smisaki 	    pkt_buf_addr_pp,
91052ccf843Smisaki 	    pktbufsz_type,
91152ccf843Smisaki 	    bufsize,
91252ccf843Smisaki 	    anchor_index,
91352ccf843Smisaki 	    chunk_index,
91452ccf843Smisaki 	    dvma_addr));
91544961713Sgirish 
91644961713Sgirish 	offset = pktbuf_pp - dvma_addr; /* offset within the chunk */
91744961713Sgirish 	block_size = rbr_p->block_size; /* System  block(page) size */
91844961713Sgirish 
91944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
92052ccf843Smisaki 	    "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)"
92152ccf843Smisaki 	    "buf_pp $%p btype %d bufsize %d "
92252ccf843Smisaki 	    "anchor_index %d chunk_index %d dvma $%p "
92352ccf843Smisaki 	    "offset %d block_size %d",
92452ccf843Smisaki 	    pkt_buf_addr_pp,
92552ccf843Smisaki 	    pktbufsz_type,
92652ccf843Smisaki 	    bufsize,
92752ccf843Smisaki 	    anchor_index,
92852ccf843Smisaki 	    chunk_index,
92952ccf843Smisaki 	    dvma_addr,
93052ccf843Smisaki 	    offset,
93152ccf843Smisaki 	    block_size));
93244961713Sgirish 
93344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index"));
93444961713Sgirish 
93544961713Sgirish 	block_index = (offset / block_size); /* index within chunk */
93644961713Sgirish 	total_index = chunk_index + block_index;
93744961713Sgirish 
93844961713Sgirish 
93944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
94052ccf843Smisaki 	    "==> nxge_rxbuf_pp_to_vp: "
94152ccf843Smisaki 	    "total_index %d dvma_addr $%p "
94252ccf843Smisaki 	    "offset %d block_size %d "
94352ccf843Smisaki 	    "block_index %d ",
94452ccf843Smisaki 	    total_index, dvma_addr,
94552ccf843Smisaki 	    offset, block_size,
94652ccf843Smisaki 	    block_index));
947adfcba55Sjoycey 	*pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr +
94852ccf843Smisaki 	    (uint64_t)offset);
94944961713Sgirish 
95044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
95152ccf843Smisaki 	    "==> nxge_rxbuf_pp_to_vp: "
95252ccf843Smisaki 	    "total_index %d dvma_addr $%p "
95352ccf843Smisaki 	    "offset %d block_size %d "
95452ccf843Smisaki 	    "block_index %d "
95552ccf843Smisaki 	    "*pkt_buf_addr_p $%p",
95652ccf843Smisaki 	    total_index, dvma_addr,
95752ccf843Smisaki 	    offset, block_size,
95852ccf843Smisaki 	    block_index,
95952ccf843Smisaki 	    *pkt_buf_addr_p));
96044961713Sgirish 
96144961713Sgirish 
96244961713Sgirish 	*msg_index = total_index;
96344961713Sgirish 	*bufoffset =  (offset & page_size_mask);
96444961713Sgirish 
96544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
96652ccf843Smisaki 	    "==> nxge_rxbuf_pp_to_vp: get msg index: "
96752ccf843Smisaki 	    "msg_index %d bufoffset_index %d",
96852ccf843Smisaki 	    *msg_index,
96952ccf843Smisaki 	    *bufoffset));
97044961713Sgirish 
97144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp"));
97244961713Sgirish 
97344961713Sgirish 	return (NXGE_OK);
97444961713Sgirish }
97544961713Sgirish 
97644961713Sgirish /*
97744961713Sgirish  * used by quick sort (qsort) function
97844961713Sgirish  * to perform comparison
97944961713Sgirish  */
98044961713Sgirish static int
nxge_sort_compare(const void * p1,const void * p2)98144961713Sgirish nxge_sort_compare(const void *p1, const void *p2)
98244961713Sgirish {
98344961713Sgirish 
98444961713Sgirish 	rxbuf_index_info_t *a, *b;
98544961713Sgirish 
98644961713Sgirish 	a = (rxbuf_index_info_t *)p1;
98744961713Sgirish 	b = (rxbuf_index_info_t *)p2;
98844961713Sgirish 
98944961713Sgirish 	if (a->dvma_addr > b->dvma_addr)
99044961713Sgirish 		return (1);
99144961713Sgirish 	if (a->dvma_addr < b->dvma_addr)
99244961713Sgirish 		return (-1);
99344961713Sgirish 	return (0);
99444961713Sgirish }
99544961713Sgirish 
99644961713Sgirish 
99744961713Sgirish 
99844961713Sgirish /*
99944961713Sgirish  * grabbed this sort implementation from common/syscall/avl.c
100044961713Sgirish  *
100144961713Sgirish  */
100244961713Sgirish /*
100344961713Sgirish  * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified.
100444961713Sgirish  * v = Ptr to array/vector of objs
100544961713Sgirish  * n = # objs in the array
100644961713Sgirish  * s = size of each obj (must be multiples of a word size)
100744961713Sgirish  * f = ptr to function to compare two objs
100844961713Sgirish  *	returns (-1 = less than, 0 = equal, 1 = greater than
100944961713Sgirish  */
101044961713Sgirish void
nxge_ksort(caddr_t v,int n,int s,int (* f)())101144961713Sgirish nxge_ksort(caddr_t v, int n, int s, int (*f)())
101244961713Sgirish {
101344961713Sgirish 	int g, i, j, ii;
101444961713Sgirish 	unsigned int *p1, *p2;
101544961713Sgirish 	unsigned int tmp;
101644961713Sgirish 
101744961713Sgirish 	/* No work to do */
101844961713Sgirish 	if (v == NULL || n <= 1)
101944961713Sgirish 		return;
102044961713Sgirish 	/* Sanity check on arguments */
102144961713Sgirish 	ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0);
102244961713Sgirish 	ASSERT(s > 0);
102344961713Sgirish 
102444961713Sgirish 	for (g = n / 2; g > 0; g /= 2) {
102544961713Sgirish 		for (i = g; i < n; i++) {
102644961713Sgirish 			for (j = i - g; j >= 0 &&
102752ccf843Smisaki 			    (*f)(v + j * s, v + (j + g) * s) == 1;
102852ccf843Smisaki 			    j -= g) {
102944961713Sgirish 				p1 = (unsigned *)(v + j * s);
103044961713Sgirish 				p2 = (unsigned *)(v + (j + g) * s);
103144961713Sgirish 				for (ii = 0; ii < s / 4; ii++) {
103244961713Sgirish 					tmp = *p1;
103344961713Sgirish 					*p1++ = *p2;
103444961713Sgirish 					*p2++ = tmp;
103544961713Sgirish 				}
103644961713Sgirish 			}
103744961713Sgirish 		}
103844961713Sgirish 	}
103944961713Sgirish }
104044961713Sgirish 
104144961713Sgirish /*
104244961713Sgirish  * Initialize data structures required for rxdma
104344961713Sgirish  * buffer dvma->vmem address lookup
104444961713Sgirish  */
104544961713Sgirish /*ARGSUSED*/
104644961713Sgirish static nxge_status_t
nxge_rxbuf_index_info_init(p_nxge_t nxgep,p_rx_rbr_ring_t rbrp)104744961713Sgirish nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp)
104844961713Sgirish {
104944961713Sgirish 
105044961713Sgirish 	int index;
105144961713Sgirish 	rxring_info_t *ring_info;
105244961713Sgirish 	int max_iteration = 0, max_index = 0;
105344961713Sgirish 
105444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init"));
105544961713Sgirish 
105644961713Sgirish 	ring_info = rbrp->ring_info;
105744961713Sgirish 	ring_info->hint[0] = NO_HINT;
105844961713Sgirish 	ring_info->hint[1] = NO_HINT;
105944961713Sgirish 	ring_info->hint[2] = NO_HINT;
106044961713Sgirish 	max_index = rbrp->num_blocks;
106144961713Sgirish 
106244961713Sgirish 		/* read the DVMA address information and sort it */
106344961713Sgirish 		/* do init of the information array */
106444961713Sgirish 
106544961713Sgirish 
106644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
106752ccf843Smisaki 	    " nxge_rxbuf_index_info_init Sort ptrs"));
106844961713Sgirish 
106944961713Sgirish 		/* sort the array */
107044961713Sgirish 	nxge_ksort((void *)ring_info->buffer, max_index,
107152ccf843Smisaki 	    sizeof (rxbuf_index_info_t), nxge_sort_compare);
107244961713Sgirish 
107344961713Sgirish 
107444961713Sgirish 
107544961713Sgirish 	for (index = 0; index < max_index; index++) {
107644961713Sgirish 		NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
107752ccf843Smisaki 		    " nxge_rxbuf_index_info_init: sorted chunk %d "
107852ccf843Smisaki 		    " ioaddr $%p kaddr $%p size %x",
107952ccf843Smisaki 		    index, ring_info->buffer[index].dvma_addr,
108052ccf843Smisaki 		    ring_info->buffer[index].kaddr,
108152ccf843Smisaki 		    ring_info->buffer[index].buf_size));
108244961713Sgirish 	}
108344961713Sgirish 
108444961713Sgirish 	max_iteration = 0;
108544961713Sgirish 	while (max_index >= (1ULL << max_iteration))
108644961713Sgirish 		max_iteration++;
108744961713Sgirish 	ring_info->max_iterations = max_iteration + 1;
108844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
108952ccf843Smisaki 	    " nxge_rxbuf_index_info_init Find max iter %d",
109052ccf843Smisaki 	    ring_info->max_iterations));
109144961713Sgirish 
109244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init"));
109344961713Sgirish 	return (NXGE_OK);
109444961713Sgirish }
109544961713Sgirish 
10960a8e077aSspeer /* ARGSUSED */
109744961713Sgirish void
nxge_dump_rcr_entry(p_nxge_t nxgep,p_rcr_entry_t entry_p)109844961713Sgirish nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p)
109944961713Sgirish {
110044961713Sgirish #ifdef	NXGE_DEBUG
110144961713Sgirish 
110244961713Sgirish 	uint32_t bptr;
110344961713Sgirish 	uint64_t pp;
110444961713Sgirish 
110544961713Sgirish 	bptr = entry_p->bits.hdw.pkt_buf_addr;
110644961713Sgirish 
110744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
110852ccf843Smisaki 	    "\trcr entry $%p "
110952ccf843Smisaki 	    "\trcr entry 0x%0llx "
111052ccf843Smisaki 	    "\trcr entry 0x%08x "
111152ccf843Smisaki 	    "\trcr entry 0x%08x "
111252ccf843Smisaki 	    "\tvalue 0x%0llx\n"
111352ccf843Smisaki 	    "\tmulti = %d\n"
111452ccf843Smisaki 	    "\tpkt_type = 0x%x\n"
111552ccf843Smisaki 	    "\tzero_copy = %d\n"
111652ccf843Smisaki 	    "\tnoport = %d\n"
111752ccf843Smisaki 	    "\tpromis = %d\n"
111852ccf843Smisaki 	    "\terror = 0x%04x\n"
111952ccf843Smisaki 	    "\tdcf_err = 0x%01x\n"
112052ccf843Smisaki 	    "\tl2_len = %d\n"
112152ccf843Smisaki 	    "\tpktbufsize = %d\n"
112252ccf843Smisaki 	    "\tpkt_buf_addr = $%p\n"
112352ccf843Smisaki 	    "\tpkt_buf_addr (<< 6) = $%p\n",
112452ccf843Smisaki 	    entry_p,
112552ccf843Smisaki 	    *(int64_t *)entry_p,
112652ccf843Smisaki 	    *(int32_t *)entry_p,
112752ccf843Smisaki 	    *(int32_t *)((char *)entry_p + 32),
112852ccf843Smisaki 	    entry_p->value,
112952ccf843Smisaki 	    entry_p->bits.hdw.multi,
113052ccf843Smisaki 	    entry_p->bits.hdw.pkt_type,
113152ccf843Smisaki 	    entry_p->bits.hdw.zero_copy,
113252ccf843Smisaki 	    entry_p->bits.hdw.noport,
113352ccf843Smisaki 	    entry_p->bits.hdw.promis,
113452ccf843Smisaki 	    entry_p->bits.hdw.error,
113552ccf843Smisaki 	    entry_p->bits.hdw.dcf_err,
113652ccf843Smisaki 	    entry_p->bits.hdw.l2_len,
113752ccf843Smisaki 	    entry_p->bits.hdw.pktbufsz,
113852ccf843Smisaki 	    bptr,
113952ccf843Smisaki 	    entry_p->bits.ldw.pkt_buf_addr));
114044961713Sgirish 
114144961713Sgirish 	pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) <<
114252ccf843Smisaki 	    RCR_PKT_BUF_ADDR_SHIFT;
114344961713Sgirish 
114444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d",
114552ccf843Smisaki 	    pp, (*(int64_t *)entry_p >> 40) & 0x3fff));
114644961713Sgirish #endif
114744961713Sgirish }
114844961713Sgirish 
114944961713Sgirish void
nxge_rxdma_regs_dump(p_nxge_t nxgep,int rdc)115044961713Sgirish nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc)
115144961713Sgirish {
115244961713Sgirish 	npi_handle_t		handle;
1153*86ef0a63SRichard Lowe 	rbr_stat_t		rbr_stat;
1154*86ef0a63SRichard Lowe 	addr44_t		hd_addr;
1155*86ef0a63SRichard Lowe 	addr44_t		tail_addr;
1156*86ef0a63SRichard Lowe 	uint16_t		qlen;
115744961713Sgirish 
115844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
115952ccf843Smisaki 	    "==> nxge_rxdma_regs_dump: rdc channel %d", rdc));
116044961713Sgirish 
116144961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
116244961713Sgirish 
116344961713Sgirish 	/* RBR head */
116444961713Sgirish 	hd_addr.addr = 0;
116544961713Sgirish 	(void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr);
116653f3d8ecSyc 	printf("nxge_rxdma_regs_dump: got hdptr $%p \n",
116752ccf843Smisaki 	    (void *)hd_addr.addr);
116844961713Sgirish 
116944961713Sgirish 	/* RBR stats */
117044961713Sgirish 	(void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat);
117144961713Sgirish 	printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen);
117244961713Sgirish 
117344961713Sgirish 	/* RCR tail */
117444961713Sgirish 	tail_addr.addr = 0;
117544961713Sgirish 	(void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr);
117653f3d8ecSyc 	printf("nxge_rxdma_regs_dump: got tail ptr $%p \n",
117752ccf843Smisaki 	    (void *)tail_addr.addr);
117844961713Sgirish 
117944961713Sgirish 	/* RCR qlen */
118044961713Sgirish 	(void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen);
118144961713Sgirish 	printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen);
118244961713Sgirish 
118344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
118452ccf843Smisaki 	    "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc));
118544961713Sgirish }
118644961713Sgirish 
118744961713Sgirish nxge_status_t
nxge_rxdma_hw_mode(p_nxge_t nxgep,boolean_t enable)118844961713Sgirish nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
118944961713Sgirish {
1190678453a8Sspeer 	nxge_grp_set_t *set = &nxgep->rx_set;
1191678453a8Sspeer 	nxge_status_t status;
1192678453a8Sspeer 	npi_status_t rs;
1193678453a8Sspeer 	int rdc;
119444961713Sgirish 
119544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
119652ccf843Smisaki 	    "==> nxge_rxdma_hw_mode: mode %d", enable));
119744961713Sgirish 
119844961713Sgirish 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
119944961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
1200678453a8Sspeer 		    "<== nxge_rxdma_mode: not initialized"));
120144961713Sgirish 		return (NXGE_ERROR);
120244961713Sgirish 	}
120344961713Sgirish 
1204678453a8Sspeer 	if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
1205678453a8Sspeer 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1206678453a8Sspeer 		    "<== nxge_tx_port_fatal_err_recover: "
1207678453a8Sspeer 		    "NULL ring pointer(s)"));
120844961713Sgirish 		return (NXGE_ERROR);
120944961713Sgirish 	}
121044961713Sgirish 
1211678453a8Sspeer 	if (set->owned.map == 0) {
121244961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
1213678453a8Sspeer 		    "nxge_rxdma_regs_dump_channels: no channels"));
1214b37cc459SToomas Soome 		return (0);
121544961713Sgirish 	}
121644961713Sgirish 
1217e3d11eeeSToomas Soome 	rs = 0;
1218678453a8Sspeer 	for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1219678453a8Sspeer 		if ((1 << rdc) & set->owned.map) {
1220678453a8Sspeer 			rx_rbr_ring_t *ring =
1221678453a8Sspeer 			    nxgep->rx_rbr_rings->rbr_rings[rdc];
1222678453a8Sspeer 			npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep);
1223678453a8Sspeer 			if (ring) {
1224678453a8Sspeer 				if (enable) {
1225678453a8Sspeer 					NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1226678453a8Sspeer 					    "==> nxge_rxdma_hw_mode: "
1227678453a8Sspeer 					    "channel %d (enable)", rdc));
1228678453a8Sspeer 					rs = npi_rxdma_cfg_rdc_enable
1229678453a8Sspeer 					    (handle, rdc);
1230678453a8Sspeer 				} else {
1231678453a8Sspeer 					NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1232678453a8Sspeer 					    "==> nxge_rxdma_hw_mode: "
1233678453a8Sspeer 					    "channel %d disable)", rdc));
1234678453a8Sspeer 					rs = npi_rxdma_cfg_rdc_disable
1235678453a8Sspeer 					    (handle, rdc);
1236678453a8Sspeer 				}
1237678453a8Sspeer 			}
123844961713Sgirish 		}
123944961713Sgirish 	}
124044961713Sgirish 
124144961713Sgirish 	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
124244961713Sgirish 
124344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
124452ccf843Smisaki 	    "<== nxge_rxdma_hw_mode: status 0x%x", status));
124544961713Sgirish 
124644961713Sgirish 	return (status);
124744961713Sgirish }
124844961713Sgirish 
124944961713Sgirish void
nxge_rxdma_enable_channel(p_nxge_t nxgep,uint16_t channel)125044961713Sgirish nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
125144961713Sgirish {
125244961713Sgirish 	npi_handle_t		handle;
125344961713Sgirish 
125444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
125552ccf843Smisaki 	    "==> nxge_rxdma_enable_channel: channel %d", channel));
125644961713Sgirish 
125744961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
125844961713Sgirish 	(void) npi_rxdma_cfg_rdc_enable(handle, channel);
125944961713Sgirish 
126044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel"));
126144961713Sgirish }
126244961713Sgirish 
126344961713Sgirish void
nxge_rxdma_disable_channel(p_nxge_t nxgep,uint16_t channel)126444961713Sgirish nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
126544961713Sgirish {
126644961713Sgirish 	npi_handle_t		handle;
126744961713Sgirish 
126844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
126952ccf843Smisaki 	    "==> nxge_rxdma_disable_channel: channel %d", channel));
127044961713Sgirish 
127144961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
127244961713Sgirish 	(void) npi_rxdma_cfg_rdc_disable(handle, channel);
127344961713Sgirish 
127444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel"));
127544961713Sgirish }
127644961713Sgirish 
127744961713Sgirish void
nxge_hw_start_rx(p_nxge_t nxgep)127844961713Sgirish nxge_hw_start_rx(p_nxge_t nxgep)
127944961713Sgirish {
128044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx"));
128144961713Sgirish 
128244961713Sgirish 	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START);
128344961713Sgirish 	(void) nxge_rx_mac_enable(nxgep);
128444961713Sgirish 
128544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx"));
128644961713Sgirish }
128744961713Sgirish 
128844961713Sgirish /*ARGSUSED*/
128944961713Sgirish void
nxge_fixup_rxdma_rings(p_nxge_t nxgep)129044961713Sgirish nxge_fixup_rxdma_rings(p_nxge_t nxgep)
129144961713Sgirish {
1292678453a8Sspeer 	nxge_grp_set_t *set = &nxgep->rx_set;
1293678453a8Sspeer 	int rdc;
129444961713Sgirish 
129544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings"));
129644961713Sgirish 
1297678453a8Sspeer 	if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
1298678453a8Sspeer 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1299678453a8Sspeer 		    "<== nxge_tx_port_fatal_err_recover: "
1300678453a8Sspeer 		    "NULL ring pointer(s)"));
130144961713Sgirish 		return;
130244961713Sgirish 	}
130344961713Sgirish 
1304678453a8Sspeer 	if (set->owned.map == 0) {
130544961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
1306678453a8Sspeer 		    "nxge_rxdma_regs_dump_channels: no channels"));
130744961713Sgirish 		return;
130844961713Sgirish 	}
130944961713Sgirish 
1310678453a8Sspeer 	for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1311678453a8Sspeer 		if ((1 << rdc) & set->owned.map) {
1312678453a8Sspeer 			rx_rbr_ring_t *ring =
1313678453a8Sspeer 			    nxgep->rx_rbr_rings->rbr_rings[rdc];
1314678453a8Sspeer 			if (ring) {
1315678453a8Sspeer 				nxge_rxdma_hw_stop(nxgep, rdc);
1316678453a8Sspeer 				NXGE_DEBUG_MSG((nxgep, RX_CTL,
131752ccf843Smisaki 				    "==> nxge_fixup_rxdma_rings: "
131852ccf843Smisaki 				    "channel %d ring $%px",
131952ccf843Smisaki 				    rdc, ring));
13203587e8e2SMichael Speer 				(void) nxge_rxdma_fix_channel(nxgep, rdc);
1321678453a8Sspeer 			}
1322678453a8Sspeer 		}
132344961713Sgirish 	}
132444961713Sgirish 
132544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings"));
132644961713Sgirish }
132744961713Sgirish 
132844961713Sgirish void
nxge_rxdma_fix_channel(p_nxge_t nxgep,uint16_t channel)132944961713Sgirish nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
133044961713Sgirish {
133144961713Sgirish 	int			ndmas;
1332*86ef0a63SRichard Lowe 	p_rx_rbr_rings_t	rx_rbr_rings;
133344961713Sgirish 	p_rx_rbr_ring_t		*rbr_rings;
1334*86ef0a63SRichard Lowe 	p_rx_rcr_rings_t	rx_rcr_rings;
133544961713Sgirish 	p_rx_rcr_ring_t		*rcr_rings;
1336*86ef0a63SRichard Lowe 	p_rx_mbox_areas_t	rx_mbox_areas_p;
133744961713Sgirish 	p_rx_mbox_t		*rx_mbox_p;
133844961713Sgirish 	p_nxge_dma_pool_t	dma_buf_poolp;
133944961713Sgirish 	p_nxge_dma_pool_t	dma_cntl_poolp;
1340*86ef0a63SRichard Lowe 	p_rx_rbr_ring_t		rbrp;
1341*86ef0a63SRichard Lowe 	p_rx_rcr_ring_t		rcrp;
1342*86ef0a63SRichard Lowe 	p_rx_mbox_t		mboxp;
1343*86ef0a63SRichard Lowe 	p_nxge_dma_common_t	dmap;
134444961713Sgirish 	nxge_status_t		status = NXGE_OK;
134544961713Sgirish 
13463587e8e2SMichael Speer 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel"));
134744961713Sgirish 
134844961713Sgirish 	(void) nxge_rxdma_stop_channel(nxgep, channel);
134944961713Sgirish 
135044961713Sgirish 	dma_buf_poolp = nxgep->rx_buf_pool_p;
135144961713Sgirish 	dma_cntl_poolp = nxgep->rx_cntl_pool_p;
135244961713Sgirish 
135344961713Sgirish 	if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
135444961713Sgirish 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
13553587e8e2SMichael Speer 		    "<== nxge_rxdma_fix_channel: buf not allocated"));
135644961713Sgirish 		return;
135744961713Sgirish 	}
135844961713Sgirish 
135944961713Sgirish 	ndmas = dma_buf_poolp->ndmas;
136044961713Sgirish 	if (!ndmas) {
136144961713Sgirish 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
13623587e8e2SMichael Speer 		    "<== nxge_rxdma_fix_channel: no dma allocated"));
136344961713Sgirish 		return;
136444961713Sgirish 	}
136544961713Sgirish 
1366a3c5bd6dSspeer 	rx_rbr_rings = nxgep->rx_rbr_rings;
136744961713Sgirish 	rx_rcr_rings = nxgep->rx_rcr_rings;
136844961713Sgirish 	rbr_rings = rx_rbr_rings->rbr_rings;
136944961713Sgirish 	rcr_rings = rx_rcr_rings->rcr_rings;
137044961713Sgirish 	rx_mbox_areas_p = nxgep->rx_mbox_areas_p;
137144961713Sgirish 	rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
137244961713Sgirish 
137344961713Sgirish 	/* Reinitialize the receive block and completion rings */
13743587e8e2SMichael Speer 	rbrp = (p_rx_rbr_ring_t)rbr_rings[channel],
13753587e8e2SMichael Speer 	    rcrp = (p_rx_rcr_ring_t)rcr_rings[channel],
13763587e8e2SMichael Speer 	    mboxp = (p_rx_mbox_t)rx_mbox_p[channel];
137744961713Sgirish 
137844961713Sgirish 	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
137944961713Sgirish 	rbrp->rbr_rd_index = 0;
138044961713Sgirish 	rcrp->comp_rd_index = 0;
138144961713Sgirish 	rcrp->comp_wt_index = 0;
138244961713Sgirish 
138344961713Sgirish 	dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
138444961713Sgirish 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
138544961713Sgirish 
138644961713Sgirish 	status = nxge_rxdma_start_channel(nxgep, channel,
138752ccf843Smisaki 	    rbrp, rcrp, mboxp);
138844961713Sgirish 	if (status != NXGE_OK) {
13893587e8e2SMichael Speer 		goto nxge_rxdma_fix_channel_fail;
1390da14cebeSEric Cheng 	}
1391da14cebeSEric Cheng 
1392da14cebeSEric Cheng 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
13933587e8e2SMichael Speer 	    "<== nxge_rxdma_fix_channel: success (0x%08x)", status));
13943587e8e2SMichael Speer 	return;
1395da14cebeSEric Cheng 
13963587e8e2SMichael Speer nxge_rxdma_fix_channel_fail:
1397da14cebeSEric Cheng 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
13983587e8e2SMichael Speer 	    "<== nxge_rxdma_fix_channel: failed (0x%08x)", status));
139944961713Sgirish }
140044961713Sgirish 
140144961713Sgirish p_rx_rbr_ring_t
nxge_rxdma_get_rbr_ring(p_nxge_t nxgep,uint16_t channel)140244961713Sgirish nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel)
140344961713Sgirish {
1404678453a8Sspeer 	nxge_grp_set_t *set = &nxgep->rx_set;
1405678453a8Sspeer 	nxge_channel_t rdc;
140644961713Sgirish 
140744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
140852ccf843Smisaki 	    "==> nxge_rxdma_get_rbr_ring: channel %d", channel));
140944961713Sgirish 
1410678453a8Sspeer 	if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
1411678453a8Sspeer 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1412678453a8Sspeer 		    "<== nxge_rxdma_get_rbr_ring: "
1413678453a8Sspeer 		    "NULL ring pointer(s)"));
141444961713Sgirish 		return (NULL);
141544961713Sgirish 	}
1416678453a8Sspeer 
1417678453a8Sspeer 	if (set->owned.map == 0) {
141844961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
1419678453a8Sspeer 		    "<== nxge_rxdma_get_rbr_ring: no channels"));
142044961713Sgirish 		return (NULL);
142144961713Sgirish 	}
142244961713Sgirish 
1423678453a8Sspeer 	for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1424678453a8Sspeer 		if ((1 << rdc) & set->owned.map) {
1425678453a8Sspeer 			rx_rbr_ring_t *ring =
1426678453a8Sspeer 			    nxgep->rx_rbr_rings->rbr_rings[rdc];
1427678453a8Sspeer 			if (ring) {
1428678453a8Sspeer 				if (channel == ring->rdc) {
1429678453a8Sspeer 					NXGE_DEBUG_MSG((nxgep, RX_CTL,
1430678453a8Sspeer 					    "==> nxge_rxdma_get_rbr_ring: "
1431678453a8Sspeer 					    "channel %d ring $%p", rdc, ring));
1432678453a8Sspeer 					return (ring);
1433678453a8Sspeer 				}
1434678453a8Sspeer 			}
143544961713Sgirish 		}
143644961713Sgirish 	}
143744961713Sgirish 
143844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
143952ccf843Smisaki 	    "<== nxge_rxdma_get_rbr_ring: not found"));
144044961713Sgirish 
144144961713Sgirish 	return (NULL);
144244961713Sgirish }
144344961713Sgirish 
144444961713Sgirish p_rx_rcr_ring_t
nxge_rxdma_get_rcr_ring(p_nxge_t nxgep,uint16_t channel)144544961713Sgirish nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel)
144644961713Sgirish {
1447678453a8Sspeer 	nxge_grp_set_t *set = &nxgep->rx_set;
1448678453a8Sspeer 	nxge_channel_t rdc;
144944961713Sgirish 
145044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
145152ccf843Smisaki 	    "==> nxge_rxdma_get_rcr_ring: channel %d", channel));
145244961713Sgirish 
1453678453a8Sspeer 	if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) {
1454678453a8Sspeer 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1455678453a8Sspeer 		    "<== nxge_rxdma_get_rcr_ring: "
1456678453a8Sspeer 		    "NULL ring pointer(s)"));
145744961713Sgirish 		return (NULL);
145844961713Sgirish 	}
1459678453a8Sspeer 
1460678453a8Sspeer 	if (set->owned.map == 0) {
146144961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
1462678453a8Sspeer 		    "<== nxge_rxdma_get_rbr_ring: no channels"));
146344961713Sgirish 		return (NULL);
146444961713Sgirish 	}
146544961713Sgirish 
1466678453a8Sspeer 	for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1467678453a8Sspeer 		if ((1 << rdc) & set->owned.map) {
1468678453a8Sspeer 			rx_rcr_ring_t *ring =
1469678453a8Sspeer 			    nxgep->rx_rcr_rings->rcr_rings[rdc];
1470678453a8Sspeer 			if (ring) {
1471678453a8Sspeer 				if (channel == ring->rdc) {
1472678453a8Sspeer 					NXGE_DEBUG_MSG((nxgep, RX_CTL,
1473678453a8Sspeer 					    "==> nxge_rxdma_get_rcr_ring: "
1474678453a8Sspeer 					    "channel %d ring $%p", rdc, ring));
1475678453a8Sspeer 					return (ring);
1476678453a8Sspeer 				}
1477678453a8Sspeer 			}
147844961713Sgirish 		}
147944961713Sgirish 	}
148044961713Sgirish 
148144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
148252ccf843Smisaki 	    "<== nxge_rxdma_get_rcr_ring: not found"));
148344961713Sgirish 
148444961713Sgirish 	return (NULL);
148544961713Sgirish }
148644961713Sgirish 
148744961713Sgirish /*
148844961713Sgirish  * Static functions start here.
148944961713Sgirish  */
149044961713Sgirish static p_rx_msg_t
nxge_allocb(size_t size,uint32_t pri,p_nxge_dma_common_t dmabuf_p)149144961713Sgirish nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p)
149244961713Sgirish {
1493*86ef0a63SRichard Lowe 	p_rx_msg_t nxge_mp		= NULL;
149444961713Sgirish 	p_nxge_dma_common_t		dmamsg_p;
1495*86ef0a63SRichard Lowe 	uchar_t				*buffer;
149644961713Sgirish 
149744961713Sgirish 	nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP);
149844961713Sgirish 	if (nxge_mp == NULL) {
149956d930aeSspeer 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
150052ccf843Smisaki 		    "Allocation of a rx msg failed."));
150144961713Sgirish 		goto nxge_allocb_exit;
150244961713Sgirish 	}
150344961713Sgirish 
150444961713Sgirish 	nxge_mp->use_buf_pool = B_FALSE;
150544961713Sgirish 	if (dmabuf_p) {
150644961713Sgirish 		nxge_mp->use_buf_pool = B_TRUE;
150744961713Sgirish 		dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma;
150844961713Sgirish 		*dmamsg_p = *dmabuf_p;
150944961713Sgirish 		dmamsg_p->nblocks = 1;
151044961713Sgirish 		dmamsg_p->block_size = size;
151144961713Sgirish 		dmamsg_p->alength = size;
151244961713Sgirish 		buffer = (uchar_t *)dmabuf_p->kaddrp;
151344961713Sgirish 
151444961713Sgirish 		dmabuf_p->kaddrp = (void *)
151552ccf843Smisaki 		    ((char *)dmabuf_p->kaddrp + size);
151644961713Sgirish 		dmabuf_p->ioaddr_pp = (void *)
151752ccf843Smisaki 		    ((char *)dmabuf_p->ioaddr_pp + size);
151844961713Sgirish 		dmabuf_p->alength -= size;
151944961713Sgirish 		dmabuf_p->offset += size;
152044961713Sgirish 		dmabuf_p->dma_cookie.dmac_laddress += size;
152144961713Sgirish 		dmabuf_p->dma_cookie.dmac_size -= size;
152244961713Sgirish 
152344961713Sgirish 	} else {
152444961713Sgirish 		buffer = KMEM_ALLOC(size, KM_NOSLEEP);
152544961713Sgirish 		if (buffer == NULL) {
152656d930aeSspeer 			NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
152752ccf843Smisaki 			    "Allocation of a receive page failed."));
152844961713Sgirish 			goto nxge_allocb_fail1;
152944961713Sgirish 		}
153044961713Sgirish 	}
153144961713Sgirish 
153244961713Sgirish 	nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb);
153344961713Sgirish 	if (nxge_mp->rx_mblk_p == NULL) {
153456d930aeSspeer 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed."));
153544961713Sgirish 		goto nxge_allocb_fail2;
153644961713Sgirish 	}
153744961713Sgirish 
153844961713Sgirish 	nxge_mp->buffer = buffer;
153944961713Sgirish 	nxge_mp->block_size = size;
154044961713Sgirish 	nxge_mp->freeb.free_func = (void (*)())nxge_freeb;
154144961713Sgirish 	nxge_mp->freeb.free_arg = (caddr_t)nxge_mp;
154244961713Sgirish 	nxge_mp->ref_cnt = 1;
154344961713Sgirish 	nxge_mp->free = B_TRUE;
154444961713Sgirish 	nxge_mp->rx_use_bcopy = B_FALSE;
154544961713Sgirish 
154614ea4bb7Ssd 	atomic_inc_32(&nxge_mblks_pending);
154744961713Sgirish 
154844961713Sgirish 	goto nxge_allocb_exit;
154944961713Sgirish 
155044961713Sgirish nxge_allocb_fail2:
155144961713Sgirish 	if (!nxge_mp->use_buf_pool) {
155244961713Sgirish 		KMEM_FREE(buffer, size);
155344961713Sgirish 	}
155444961713Sgirish 
155544961713Sgirish nxge_allocb_fail1:
155644961713Sgirish 	KMEM_FREE(nxge_mp, sizeof (rx_msg_t));
155744961713Sgirish 	nxge_mp = NULL;
155844961713Sgirish 
155944961713Sgirish nxge_allocb_exit:
156044961713Sgirish 	return (nxge_mp);
156144961713Sgirish }
156244961713Sgirish 
156344961713Sgirish p_mblk_t
nxge_dupb(p_rx_msg_t nxge_mp,uint_t offset,size_t size)156444961713Sgirish nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size)
156544961713Sgirish {
156644961713Sgirish 	p_mblk_t mp;
156744961713Sgirish 
156844961713Sgirish 	NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb"));
156944961713Sgirish 	NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p "
157052ccf843Smisaki 	    "offset = 0x%08X "
157152ccf843Smisaki 	    "size = 0x%08X",
157252ccf843Smisaki 	    nxge_mp, offset, size));
157344961713Sgirish 
157444961713Sgirish 	mp = desballoc(&nxge_mp->buffer[offset], size,
157552ccf843Smisaki 	    0, &nxge_mp->freeb);
157644961713Sgirish 	if (mp == NULL) {
157744961713Sgirish 		NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
157844961713Sgirish 		goto nxge_dupb_exit;
157944961713Sgirish 	}
158044961713Sgirish 	atomic_inc_32(&nxge_mp->ref_cnt);
158144961713Sgirish 
158244961713Sgirish 
158344961713Sgirish nxge_dupb_exit:
158444961713Sgirish 	NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p",
158552ccf843Smisaki 	    nxge_mp));
158644961713Sgirish 	return (mp);
158744961713Sgirish }
158844961713Sgirish 
158944961713Sgirish p_mblk_t
nxge_dupb_bcopy(p_rx_msg_t nxge_mp,uint_t offset,size_t size)159044961713Sgirish nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size)
159144961713Sgirish {
159244961713Sgirish 	p_mblk_t mp;
159344961713Sgirish 	uchar_t *dp;
159444961713Sgirish 
159544961713Sgirish 	mp = allocb(size + NXGE_RXBUF_EXTRA, 0);
159644961713Sgirish 	if (mp == NULL) {
159744961713Sgirish 		NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
159844961713Sgirish 		goto nxge_dupb_bcopy_exit;
159944961713Sgirish 	}
160044961713Sgirish 	dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA;
160144961713Sgirish 	bcopy((void *)&nxge_mp->buffer[offset], dp, size);
160244961713Sgirish 	mp->b_wptr = dp + size;
160344961713Sgirish 
160444961713Sgirish nxge_dupb_bcopy_exit:
160544961713Sgirish 	NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p",
160652ccf843Smisaki 	    nxge_mp));
160744961713Sgirish 	return (mp);
160844961713Sgirish }
160944961713Sgirish 
161044961713Sgirish void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p,
161144961713Sgirish 	p_rx_msg_t rx_msg_p);
161244961713Sgirish 
161344961713Sgirish void
nxge_post_page(p_nxge_t nxgep,p_rx_rbr_ring_t rx_rbr_p,p_rx_msg_t rx_msg_p)161444961713Sgirish nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p)
161544961713Sgirish {
161644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page"));
161744961713Sgirish 
161844961713Sgirish 	/* Reuse this buffer */
161944961713Sgirish 	rx_msg_p->free = B_FALSE;
162044961713Sgirish 	rx_msg_p->cur_usage_cnt = 0;
162144961713Sgirish 	rx_msg_p->max_usage_cnt = 0;
162244961713Sgirish 	rx_msg_p->pkt_buf_size = 0;
162344961713Sgirish 
162444961713Sgirish 	if (rx_rbr_p->rbr_use_bcopy) {
162544961713Sgirish 		rx_msg_p->rx_use_bcopy = B_FALSE;
162644961713Sgirish 		atomic_dec_32(&rx_rbr_p->rbr_consumed);
162744961713Sgirish 	}
162844961713Sgirish 
162944961713Sgirish 	/*
163044961713Sgirish 	 * Get the rbr header pointer and its offset index.
163144961713Sgirish 	 */
163244961713Sgirish 	MUTEX_ENTER(&rx_rbr_p->post_lock);
163344961713Sgirish 	rx_rbr_p->rbr_wr_index =  ((rx_rbr_p->rbr_wr_index + 1) &
163452ccf843Smisaki 	    rx_rbr_p->rbr_wrap_mask);
163544961713Sgirish 	rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr;
163644961713Sgirish 	MUTEX_EXIT(&rx_rbr_p->post_lock);
163730ac2e7bSml 	npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep),
163830ac2e7bSml 	    rx_rbr_p->rdc, 1);
163944961713Sgirish 
164044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
164152ccf843Smisaki 	    "<== nxge_post_page (channel %d post_next_index %d)",
164252ccf843Smisaki 	    rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index));
164344961713Sgirish 
164444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page"));
164544961713Sgirish }
164644961713Sgirish 
164744961713Sgirish void
nxge_freeb(p_rx_msg_t rx_msg_p)164844961713Sgirish nxge_freeb(p_rx_msg_t rx_msg_p)
164944961713Sgirish {
165044961713Sgirish 	size_t size;
165144961713Sgirish 	uchar_t *buffer = NULL;
165244961713Sgirish 	int ref_cnt;
1653958cea9eSml 	boolean_t free_state = B_FALSE;
165444961713Sgirish 
1655007969e0Stm 	rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p;
1656007969e0Stm 
165744961713Sgirish 	NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb"));
165844961713Sgirish 	NXGE_DEBUG_MSG((NULL, MEM2_CTL,
165952ccf843Smisaki 	    "nxge_freeb:rx_msg_p = $%p (block pending %d)",
166052ccf843Smisaki 	    rx_msg_p, nxge_mblks_pending));
166144961713Sgirish 
1662958cea9eSml 	/*
1663958cea9eSml 	 * First we need to get the free state, then
1664958cea9eSml 	 * atomic decrement the reference count to prevent
1665958cea9eSml 	 * the race condition with the interrupt thread that
1666958cea9eSml 	 * is processing a loaned up buffer block.
1667958cea9eSml 	 */
1668958cea9eSml 	free_state = rx_msg_p->free;
16691a5e258fSJosef 'Jeff' Sipek 	ref_cnt = atomic_dec_32_nv(&rx_msg_p->ref_cnt);
167044961713Sgirish 	if (!ref_cnt) {
167130ac2e7bSml 		atomic_dec_32(&nxge_mblks_pending);
167244961713Sgirish 		buffer = rx_msg_p->buffer;
167344961713Sgirish 		size = rx_msg_p->block_size;
167444961713Sgirish 		NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: "
167552ccf843Smisaki 		    "will free: rx_msg_p = $%p (block pending %d)",
167652ccf843Smisaki 		    rx_msg_p, nxge_mblks_pending));
167744961713Sgirish 
167844961713Sgirish 		if (!rx_msg_p->use_buf_pool) {
167944961713Sgirish 			KMEM_FREE(buffer, size);
168044961713Sgirish 		}
168114ea4bb7Ssd 
168214ea4bb7Ssd 		KMEM_FREE(rx_msg_p, sizeof (rx_msg_t));
1683007969e0Stm 
16843e82a89eSmisaki 		if (ring) {
16853e82a89eSmisaki 			/*
16863e82a89eSmisaki 			 * Decrement the receive buffer ring's reference
16873e82a89eSmisaki 			 * count, too.
16883e82a89eSmisaki 			 */
16893e82a89eSmisaki 			atomic_dec_32(&ring->rbr_ref_cnt);
1690007969e0Stm 
16913e82a89eSmisaki 			/*
1692678453a8Sspeer 			 * Free the receive buffer ring, if
16933e82a89eSmisaki 			 * 1. all the receive buffers have been freed
16943e82a89eSmisaki 			 * 2. and we are in the proper state (that is,
16953e82a89eSmisaki 			 *    we are not UNMAPPING).
16963e82a89eSmisaki 			 */
16973e82a89eSmisaki 			if (ring->rbr_ref_cnt == 0 &&
16983e82a89eSmisaki 			    ring->rbr_state == RBR_UNMAPPED) {
1699678453a8Sspeer 				/*
1700678453a8Sspeer 				 * Free receive data buffers,
1701678453a8Sspeer 				 * buffer index information
1702678453a8Sspeer 				 * (rxring_info) and
1703678453a8Sspeer 				 * the message block ring.
1704678453a8Sspeer 				 */
1705678453a8Sspeer 				NXGE_DEBUG_MSG((NULL, RX_CTL,
1706678453a8Sspeer 				    "nxge_freeb:rx_msg_p = $%p "
1707678453a8Sspeer 				    "(block pending %d) free buffers",
1708678453a8Sspeer 				    rx_msg_p, nxge_mblks_pending));
1709678453a8Sspeer 				nxge_rxdma_databuf_free(ring);
1710678453a8Sspeer 				if (ring->ring_info) {
1711678453a8Sspeer 					KMEM_FREE(ring->ring_info,
1712678453a8Sspeer 					    sizeof (rxring_info_t));
1713678453a8Sspeer 				}
1714678453a8Sspeer 
1715678453a8Sspeer 				if (ring->rx_msg_ring) {
1716678453a8Sspeer 					KMEM_FREE(ring->rx_msg_ring,
1717678453a8Sspeer 					    ring->tnblocks *
1718678453a8Sspeer 					    sizeof (p_rx_msg_t));
1719678453a8Sspeer 				}
17203e82a89eSmisaki 				KMEM_FREE(ring, sizeof (*ring));
17213e82a89eSmisaki 			}
1722007969e0Stm 		}
172314ea4bb7Ssd 		return;
172444961713Sgirish 	}
172544961713Sgirish 
172644961713Sgirish 	/*
172744961713Sgirish 	 * Repost buffer.
172844961713Sgirish 	 */
17293e82a89eSmisaki 	if (free_state && (ref_cnt == 1) && ring) {
173044961713Sgirish 		NXGE_DEBUG_MSG((NULL, RX_CTL,
173144961713Sgirish 		    "nxge_freeb: post page $%p:", rx_msg_p));
1732007969e0Stm 		if (ring->rbr_state == RBR_POSTING)
1733007969e0Stm 			nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p);
173444961713Sgirish 	}
173544961713Sgirish 
173644961713Sgirish 	NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb"));
173744961713Sgirish }
173844961713Sgirish 
173944961713Sgirish uint_t
nxge_rx_intr(char * arg1,char * arg2)1740e3d11eeeSToomas Soome nxge_rx_intr(char *arg1, char *arg2)
174144961713Sgirish {
174244961713Sgirish 	p_nxge_ldv_t		ldvp = (p_nxge_ldv_t)arg1;
174344961713Sgirish 	p_nxge_t		nxgep = (p_nxge_t)arg2;
174444961713Sgirish 	p_nxge_ldg_t		ldgp;
174544961713Sgirish 	uint8_t			channel;
174644961713Sgirish 	npi_handle_t		handle;
174744961713Sgirish 	rx_dma_ctl_stat_t	cs;
174863f531d1SSriharsha Basavapatna 	p_rx_rcr_ring_t		rcrp;
174948056c53SMichael Speer 	mblk_t			*mp = NULL;
175044961713Sgirish 
175144961713Sgirish 	if (ldvp == NULL) {
175244961713Sgirish 		NXGE_DEBUG_MSG((NULL, INT_CTL,
175352ccf843Smisaki 		    "<== nxge_rx_intr: arg2 $%p arg1 $%p",
175452ccf843Smisaki 		    nxgep, ldvp));
175544961713Sgirish 		return (DDI_INTR_CLAIMED);
175644961713Sgirish 	}
175744961713Sgirish 
175844961713Sgirish 	if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
175944961713Sgirish 		nxgep = ldvp->nxgep;
176044961713Sgirish 	}
17611d36aa9eSspeer 
17621d36aa9eSspeer 	if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
17631d36aa9eSspeer 	    (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
17641d36aa9eSspeer 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
17651d36aa9eSspeer 		    "<== nxge_rx_intr: interface not started or intialized"));
17661d36aa9eSspeer 		return (DDI_INTR_CLAIMED);
17671d36aa9eSspeer 	}
17681d36aa9eSspeer 
176944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
177052ccf843Smisaki 	    "==> nxge_rx_intr: arg2 $%p arg1 $%p",
177152ccf843Smisaki 	    nxgep, ldvp));
177244961713Sgirish 
177344961713Sgirish 	/*
1774e759c33aSMichael Speer 	 * Get the PIO handle.
177544961713Sgirish 	 */
177644961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1777da14cebeSEric Cheng 
1778e759c33aSMichael Speer 	/*
1779e759c33aSMichael Speer 	 * Get the ring to enable us to process packets.
1780e759c33aSMichael Speer 	 */
178163f531d1SSriharsha Basavapatna 	rcrp = nxgep->rx_rcr_rings->rcr_rings[ldvp->vdma_index];
1782da14cebeSEric Cheng 
1783da14cebeSEric Cheng 	/*
1784da14cebeSEric Cheng 	 * The RCR ring lock must be held when packets
1785da14cebeSEric Cheng 	 * are being processed and the hardware registers are
1786da14cebeSEric Cheng 	 * being read or written to prevent race condition
1787da14cebeSEric Cheng 	 * among the interrupt thread, the polling thread
1788da14cebeSEric Cheng 	 * (will cause fatal errors such as rcrincon bit set)
1789da14cebeSEric Cheng 	 * and the setting of the poll_flag.
1790da14cebeSEric Cheng 	 */
179163f531d1SSriharsha Basavapatna 	MUTEX_ENTER(&rcrp->lock);
1792da14cebeSEric Cheng 
179344961713Sgirish 	/*
179444961713Sgirish 	 * Get the control and status for this channel.
179544961713Sgirish 	 */
179644961713Sgirish 	channel = ldvp->channel;
179744961713Sgirish 	ldgp = ldvp->ldgp;
1798da14cebeSEric Cheng 
17990dc2366fSVenugopal Iyer 	if (!isLDOMguest(nxgep) && (!rcrp->started)) {
1800e759c33aSMichael Speer 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
1801e759c33aSMichael Speer 		    "<== nxge_rx_intr: channel is not started"));
1802e759c33aSMichael Speer 
1803e759c33aSMichael Speer 		/*
1804e759c33aSMichael Speer 		 * We received an interrupt before the ring is started.
1805e759c33aSMichael Speer 		 */
1806e759c33aSMichael Speer 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
1807e759c33aSMichael Speer 		    &cs.value);
1808e759c33aSMichael Speer 		cs.value &= RX_DMA_CTL_STAT_WR1C;
1809e759c33aSMichael Speer 		cs.bits.hdw.mex = 1;
1810e759c33aSMichael Speer 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
1811e759c33aSMichael Speer 		    cs.value);
1812e759c33aSMichael Speer 
1813e759c33aSMichael Speer 		/*
1814e759c33aSMichael Speer 		 * Rearm this logical group if this is a single device
1815e759c33aSMichael Speer 		 * group.
1816e759c33aSMichael Speer 		 */
1817e759c33aSMichael Speer 		if (ldgp->nldvs == 1) {
1818e759c33aSMichael Speer 			if (isLDOMguest(nxgep)) {
1819e759c33aSMichael Speer 				nxge_hio_ldgimgn(nxgep, ldgp);
1820e759c33aSMichael Speer 			} else {
1821e759c33aSMichael Speer 				ldgimgm_t mgm;
1822e759c33aSMichael Speer 
1823e759c33aSMichael Speer 				mgm.value = 0;
1824e759c33aSMichael Speer 				mgm.bits.ldw.arm = 1;
1825e759c33aSMichael Speer 				mgm.bits.ldw.timer = ldgp->ldg_timer;
1826e759c33aSMichael Speer 
1827e759c33aSMichael Speer 				NXGE_REG_WR64(handle,
1828e759c33aSMichael Speer 				    LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
1829e759c33aSMichael Speer 				    mgm.value);
1830e759c33aSMichael Speer 			}
1831da14cebeSEric Cheng 		}
183263f531d1SSriharsha Basavapatna 		MUTEX_EXIT(&rcrp->lock);
1833e759c33aSMichael Speer 		return (DDI_INTR_CLAIMED);
1834da14cebeSEric Cheng 	}
1835da14cebeSEric Cheng 
183663f531d1SSriharsha Basavapatna 	ASSERT(rcrp->ldgp == ldgp);
183763f531d1SSriharsha Basavapatna 	ASSERT(rcrp->ldvp == ldvp);
1838da14cebeSEric Cheng 
183944961713Sgirish 	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value);
184044961713Sgirish 
184144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d "
184252ccf843Smisaki 	    "cs 0x%016llx rcrto 0x%x rcrthres %x",
184352ccf843Smisaki 	    channel,
184452ccf843Smisaki 	    cs.value,
184552ccf843Smisaki 	    cs.bits.hdw.rcrto,
184652ccf843Smisaki 	    cs.bits.hdw.rcrthres));
184744961713Sgirish 
184863f531d1SSriharsha Basavapatna 	if (!rcrp->poll_flag) {
184963f531d1SSriharsha Basavapatna 		mp = nxge_rx_pkts(nxgep, rcrp, cs, -1);
185048056c53SMichael Speer 	}
185144961713Sgirish 
185244961713Sgirish 	/* error events. */
185344961713Sgirish 	if (cs.value & RX_DMA_CTL_STAT_ERROR) {
1854678453a8Sspeer 		(void) nxge_rx_err_evnts(nxgep, channel, cs);
185544961713Sgirish 	}
185644961713Sgirish 
185744961713Sgirish 	/*
185844961713Sgirish 	 * Enable the mailbox update interrupt if we want
185944961713Sgirish 	 * to use mailbox. We probably don't need to use
186044961713Sgirish 	 * mailbox as it only saves us one pio read.
186144961713Sgirish 	 * Also write 1 to rcrthres and rcrto to clear
186244961713Sgirish 	 * these two edge triggered bits.
186344961713Sgirish 	 */
186444961713Sgirish 	cs.value &= RX_DMA_CTL_STAT_WR1C;
186563f531d1SSriharsha Basavapatna 	cs.bits.hdw.mex = rcrp->poll_flag ? 0 : 1;
186644961713Sgirish 	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
186752ccf843Smisaki 	    cs.value);
186844961713Sgirish 
186944961713Sgirish 	/*
1870da14cebeSEric Cheng 	 * If the polling mode is enabled, disable the interrupt.
187144961713Sgirish 	 */
187263f531d1SSriharsha Basavapatna 	if (rcrp->poll_flag) {
1873da14cebeSEric Cheng 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
1874da14cebeSEric Cheng 		    "==> nxge_rx_intr: rdc %d ldgp $%p ldvp $%p "
1875da14cebeSEric Cheng 		    "(disabling interrupts)", channel, ldgp, ldvp));
187663f531d1SSriharsha Basavapatna 
1877da14cebeSEric Cheng 		/*
1878da14cebeSEric Cheng 		 * Disarm this logical group if this is a single device
1879da14cebeSEric Cheng 		 * group.
1880da14cebeSEric Cheng 		 */
1881da14cebeSEric Cheng 		if (ldgp->nldvs == 1) {
188263f531d1SSriharsha Basavapatna 			if (isLDOMguest(nxgep)) {
188363f531d1SSriharsha Basavapatna 				ldgp->arm = B_FALSE;
188463f531d1SSriharsha Basavapatna 				nxge_hio_ldgimgn(nxgep, ldgp);
188563f531d1SSriharsha Basavapatna 			} else {
188663f531d1SSriharsha Basavapatna 				ldgimgm_t mgm;
188763f531d1SSriharsha Basavapatna 				mgm.value = 0;
188863f531d1SSriharsha Basavapatna 				mgm.bits.ldw.arm = 0;
188963f531d1SSriharsha Basavapatna 				NXGE_REG_WR64(handle,
189063f531d1SSriharsha Basavapatna 				    LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
189163f531d1SSriharsha Basavapatna 				    mgm.value);
189263f531d1SSriharsha Basavapatna 			}
1893da14cebeSEric Cheng 		}
1894da14cebeSEric Cheng 	} else {
1895da14cebeSEric Cheng 		/*
189608ac1c49SNicolas Droux 		 * Rearm this logical group if this is a single device
189708ac1c49SNicolas Droux 		 * group.
1898da14cebeSEric Cheng 		 */
1899da14cebeSEric Cheng 		if (ldgp->nldvs == 1) {
1900da14cebeSEric Cheng 			if (isLDOMguest(nxgep)) {
1901da14cebeSEric Cheng 				nxge_hio_ldgimgn(nxgep, ldgp);
1902da14cebeSEric Cheng 			} else {
1903da14cebeSEric Cheng 				ldgimgm_t mgm;
1904da14cebeSEric Cheng 
1905da14cebeSEric Cheng 				mgm.value = 0;
1906da14cebeSEric Cheng 				mgm.bits.ldw.arm = 1;
1907da14cebeSEric Cheng 				mgm.bits.ldw.timer = ldgp->ldg_timer;
1908da14cebeSEric Cheng 
1909da14cebeSEric Cheng 				NXGE_REG_WR64(handle,
1910da14cebeSEric Cheng 				    LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
1911da14cebeSEric Cheng 				    mgm.value);
1912da14cebeSEric Cheng 			}
1913678453a8Sspeer 		}
1914da14cebeSEric Cheng 
1915da14cebeSEric Cheng 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
1916da14cebeSEric Cheng 		    "==> nxge_rx_intr: rdc %d ldgp $%p "
1917da14cebeSEric Cheng 		    "exiting ISR (and call mac_rx_ring)", channel, ldgp));
191844961713Sgirish 	}
191963f531d1SSriharsha Basavapatna 	MUTEX_EXIT(&rcrp->lock);
192044961713Sgirish 
192148056c53SMichael Speer 	if (mp != NULL) {
192263f531d1SSriharsha Basavapatna 		mac_rx_ring(nxgep->mach, rcrp->rcr_mac_handle, mp,
192363f531d1SSriharsha Basavapatna 		    rcrp->rcr_gen_num);
1924da14cebeSEric Cheng 	}
1925da14cebeSEric Cheng 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: DDI_INTR_CLAIMED"));
1926da14cebeSEric Cheng 	return (DDI_INTR_CLAIMED);
192744961713Sgirish }
192844961713Sgirish 
192944961713Sgirish /*
193044961713Sgirish  * This routine is the main packet receive processing function.
193144961713Sgirish  * It gets the packet type, error code, and buffer related
193244961713Sgirish  * information from the receive completion entry.
193344961713Sgirish  * How many completion entries to process is based on the number of packets
193444961713Sgirish  * queued by the hardware, a hardware maintained tail pointer
193544961713Sgirish  * and a configurable receive packet count.
193644961713Sgirish  *
193744961713Sgirish  * A chain of message blocks will be created as result of processing
193844961713Sgirish  * the completion entries. This chain of message blocks will be returned and
193944961713Sgirish  * a hardware control status register will be updated with the number of
194044961713Sgirish  * packets were removed from the hardware queue.
194144961713Sgirish  *
1942da14cebeSEric Cheng  * The RCR ring lock is held when entering this function.
194344961713Sgirish  */
1944678453a8Sspeer static mblk_t *
nxge_rx_pkts(p_nxge_t nxgep,p_rx_rcr_ring_t rcr_p,rx_dma_ctl_stat_t cs,int bytes_to_pickup)1945678453a8Sspeer nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs,
1946678453a8Sspeer     int bytes_to_pickup)
194744961713Sgirish {
194844961713Sgirish 	npi_handle_t		handle;
194944961713Sgirish 	uint8_t			channel;
195044961713Sgirish 	uint32_t		comp_rd_index;
195144961713Sgirish 	p_rcr_entry_t		rcr_desc_rd_head_p;
195244961713Sgirish 	p_rcr_entry_t		rcr_desc_rd_head_pp;
195344961713Sgirish 	p_mblk_t		nmp, mp_cont, head_mp, *tail_mp;
195444961713Sgirish 	uint16_t		qlen, nrcr_read, npkt_read;
1955678453a8Sspeer 	uint32_t		qlen_hw;
195644961713Sgirish 	boolean_t		multi;
1957678453a8Sspeer 	rcrcfig_b_t		rcr_cfg_b;
1958678453a8Sspeer 	int			totallen = 0;
1959a3c5bd6dSspeer #if defined(_BIG_ENDIAN)
196044961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
196144961713Sgirish #endif
196244961713Sgirish 
1963da14cebeSEric Cheng 	NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_rx_pkts: "
196452ccf843Smisaki 	    "channel %d", rcr_p->rdc));
196544961713Sgirish 
196644961713Sgirish 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
196744961713Sgirish 		return (NULL);
196844961713Sgirish 	}
196944961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
197044961713Sgirish 	channel = rcr_p->rdc;
197144961713Sgirish 
197244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
197352ccf843Smisaki 	    "==> nxge_rx_pkts: START: rcr channel %d "
197452ccf843Smisaki 	    "head_p $%p head_pp $%p  index %d ",
197552ccf843Smisaki 	    channel, rcr_p->rcr_desc_rd_head_p,
197652ccf843Smisaki 	    rcr_p->rcr_desc_rd_head_pp,
197752ccf843Smisaki 	    rcr_p->comp_rd_index));
197844961713Sgirish 
197944961713Sgirish 
1980a3c5bd6dSspeer #if !defined(_BIG_ENDIAN)
198144961713Sgirish 	qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff;
198244961713Sgirish #else
198344961713Sgirish 	rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen);
198444961713Sgirish 	if (rs != NPI_SUCCESS) {
1985678453a8Sspeer 		NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: "
198644961713Sgirish 		"channel %d, get qlen failed 0x%08x",
198752ccf843Smisaki 		    channel, rs));
198844961713Sgirish 		return (NULL);
198944961713Sgirish 	}
199044961713Sgirish #endif
199144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d "
199252ccf843Smisaki 	    "qlen %d", channel, qlen));
199344961713Sgirish 
199444961713Sgirish 
199544961713Sgirish 
199644961713Sgirish 	if (!qlen) {
1997da14cebeSEric Cheng 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
199852ccf843Smisaki 		    "==> nxge_rx_pkts:rcr channel %d "
199952ccf843Smisaki 		    "qlen %d (no pkts)", channel, qlen));
200044961713Sgirish 
200144961713Sgirish 		return (NULL);
200244961713Sgirish 	}
200344961713Sgirish 
200444961713Sgirish 	comp_rd_index = rcr_p->comp_rd_index;
200544961713Sgirish 
200644961713Sgirish 	rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p;
200744961713Sgirish 	rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp;
200844961713Sgirish 	nrcr_read = npkt_read = 0;
200944961713Sgirish 
201044961713Sgirish 	/*
201144961713Sgirish 	 * Number of packets queued
201244961713Sgirish 	 * (The jumbo or multi packet will be counted as only one
201344961713Sgirish 	 *  packets and it may take up more than one completion entry).
201444961713Sgirish 	 */
201544961713Sgirish 	qlen_hw = (qlen < nxge_max_rx_pkts) ?
201652ccf843Smisaki 	    qlen : nxge_max_rx_pkts;
201744961713Sgirish 	head_mp = NULL;
201844961713Sgirish 	tail_mp = &head_mp;
201944961713Sgirish 	nmp = mp_cont = NULL;
202044961713Sgirish 	multi = B_FALSE;
202144961713Sgirish 
2022a3c5bd6dSspeer 	while (qlen_hw) {
202344961713Sgirish 
202444961713Sgirish #ifdef NXGE_DEBUG
202544961713Sgirish 		nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p);
202644961713Sgirish #endif
202744961713Sgirish 		/*
202844961713Sgirish 		 * Process one completion ring entry.
202944961713Sgirish 		 */
203044961713Sgirish 		nxge_receive_packet(nxgep,
203152ccf843Smisaki 		    rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont);
203244961713Sgirish 
203344961713Sgirish 		/*
203444961713Sgirish 		 * message chaining modes
203544961713Sgirish 		 */
203614ea4bb7Ssd 		if (nmp) {
203744961713Sgirish 			nmp->b_next = NULL;
203814ea4bb7Ssd 			if (!multi && !mp_cont) { /* frame fits a partition */
203914ea4bb7Ssd 				*tail_mp = nmp;
204014ea4bb7Ssd 				tail_mp = &nmp->b_next;
2041678453a8Sspeer 				totallen += MBLKL(nmp);
204214ea4bb7Ssd 				nmp = NULL;
204314ea4bb7Ssd 			} else if (multi && !mp_cont) { /* first segment */
204414ea4bb7Ssd 				*tail_mp = nmp;
204514ea4bb7Ssd 				tail_mp = &nmp->b_cont;
2046678453a8Sspeer 				totallen += MBLKL(nmp);
204714ea4bb7Ssd 			} else if (multi && mp_cont) {	/* mid of multi segs */
204814ea4bb7Ssd 				*tail_mp = mp_cont;
204914ea4bb7Ssd 				tail_mp = &mp_cont->b_cont;
2050678453a8Sspeer 				totallen += MBLKL(mp_cont);
205114ea4bb7Ssd 			} else if (!multi && mp_cont) { /* last segment */
2052a3c5bd6dSspeer 				*tail_mp = mp_cont;
205314ea4bb7Ssd 				tail_mp = &nmp->b_next;
2054678453a8Sspeer 				totallen += MBLKL(mp_cont);
205514ea4bb7Ssd 				nmp = NULL;
205614ea4bb7Ssd 			}
205744961713Sgirish 		}
205844961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
205952ccf843Smisaki 		    "==> nxge_rx_pkts: loop: rcr channel %d "
206052ccf843Smisaki 		    "before updating: multi %d "
206152ccf843Smisaki 		    "nrcr_read %d "
206252ccf843Smisaki 		    "npk read %d "
206352ccf843Smisaki 		    "head_pp $%p  index %d ",
206452ccf843Smisaki 		    channel,
206552ccf843Smisaki 		    multi,
206652ccf843Smisaki 		    nrcr_read, npkt_read, rcr_desc_rd_head_pp,
206752ccf843Smisaki 		    comp_rd_index));
206844961713Sgirish 
206944961713Sgirish 		if (!multi) {
207044961713Sgirish 			qlen_hw--;
207144961713Sgirish 			npkt_read++;
207244961713Sgirish 		}
207344961713Sgirish 
207444961713Sgirish 		/*
207544961713Sgirish 		 * Update the next read entry.
207644961713Sgirish 		 */
207744961713Sgirish 		comp_rd_index = NEXT_ENTRY(comp_rd_index,
207852ccf843Smisaki 		    rcr_p->comp_wrap_mask);
207944961713Sgirish 
208044961713Sgirish 		rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p,
208152ccf843Smisaki 		    rcr_p->rcr_desc_first_p,
208252ccf843Smisaki 		    rcr_p->rcr_desc_last_p);
208344961713Sgirish 
208444961713Sgirish 		nrcr_read++;
208544961713Sgirish 
208644961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
208752ccf843Smisaki 		    "<== nxge_rx_pkts: (SAM, process one packet) "
208852ccf843Smisaki 		    "nrcr_read %d",
208952ccf843Smisaki 		    nrcr_read));
209044961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
209152ccf843Smisaki 		    "==> nxge_rx_pkts: loop: rcr channel %d "
209252ccf843Smisaki 		    "multi %d "
209352ccf843Smisaki 		    "nrcr_read %d "
209452ccf843Smisaki 		    "npk read %d "
209552ccf843Smisaki 		    "head_pp $%p  index %d ",
209652ccf843Smisaki 		    channel,
209752ccf843Smisaki 		    multi,
209852ccf843Smisaki 		    nrcr_read, npkt_read, rcr_desc_rd_head_pp,
209952ccf843Smisaki 		    comp_rd_index));
210044961713Sgirish 
2101678453a8Sspeer 		if ((bytes_to_pickup != -1) &&
2102678453a8Sspeer 		    (totallen >= bytes_to_pickup)) {
2103678453a8Sspeer 			break;
2104678453a8Sspeer 		}
210544961713Sgirish 	}
210644961713Sgirish 
210744961713Sgirish 	rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp;
210844961713Sgirish 	rcr_p->comp_rd_index = comp_rd_index;
210944961713Sgirish 	rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p;
211014ea4bb7Ssd 	if ((nxgep->intr_timeout != rcr_p->intr_timeout) ||
211152ccf843Smisaki 	    (nxgep->intr_threshold != rcr_p->intr_threshold)) {
21127b26d9ffSSantwona Behera 
21137b26d9ffSSantwona Behera 		rcr_p->intr_timeout = (nxgep->intr_timeout <
21147b26d9ffSSantwona Behera 		    NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN :
21157b26d9ffSSantwona Behera 		    nxgep->intr_timeout;
21167b26d9ffSSantwona Behera 
21177b26d9ffSSantwona Behera 		rcr_p->intr_threshold = (nxgep->intr_threshold <
21187b26d9ffSSantwona Behera 		    NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN :
21197b26d9ffSSantwona Behera 		    nxgep->intr_threshold;
21207b26d9ffSSantwona Behera 
212114ea4bb7Ssd 		rcr_cfg_b.value = 0x0ULL;
21227b26d9ffSSantwona Behera 		rcr_cfg_b.bits.ldw.entout = 1;
212314ea4bb7Ssd 		rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout;
212414ea4bb7Ssd 		rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold;
21257b26d9ffSSantwona Behera 
212614ea4bb7Ssd 		RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG,
212752ccf843Smisaki 		    channel, rcr_cfg_b.value);
212814ea4bb7Ssd 	}
212944961713Sgirish 
213044961713Sgirish 	cs.bits.ldw.pktread = npkt_read;
213144961713Sgirish 	cs.bits.ldw.ptrread = nrcr_read;
213244961713Sgirish 	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
213352ccf843Smisaki 	    channel, cs.value);
213444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
213552ccf843Smisaki 	    "==> nxge_rx_pkts: EXIT: rcr channel %d "
213652ccf843Smisaki 	    "head_pp $%p  index %016llx ",
213752ccf843Smisaki 	    channel,
213852ccf843Smisaki 	    rcr_p->rcr_desc_rd_head_pp,
213952ccf843Smisaki 	    rcr_p->comp_rd_index));
214044961713Sgirish 	/*
214144961713Sgirish 	 * Update RCR buffer pointer read and number of packets
214244961713Sgirish 	 * read.
214344961713Sgirish 	 */
214444961713Sgirish 
2145da14cebeSEric Cheng 	NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_rx_pkts: return"
2146da14cebeSEric Cheng 	    "channel %d", rcr_p->rdc));
2147da14cebeSEric Cheng 
214844961713Sgirish 	return (head_mp);
214944961713Sgirish }
215044961713Sgirish 
215144961713Sgirish void
nxge_receive_packet(p_nxge_t nxgep,p_rx_rcr_ring_t rcr_p,p_rcr_entry_t rcr_desc_rd_head_p,boolean_t * multi_p,mblk_t ** mp,mblk_t ** mp_cont)215244961713Sgirish nxge_receive_packet(p_nxge_t nxgep,
215344961713Sgirish     p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p,
215444961713Sgirish     boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont)
215544961713Sgirish {
215644961713Sgirish 	p_mblk_t		nmp = NULL;
215744961713Sgirish 	uint64_t		multi;
215844961713Sgirish 	uint64_t		dcf_err;
215944961713Sgirish 	uint8_t			channel;
216044961713Sgirish 
216144961713Sgirish 	boolean_t		first_entry = B_TRUE;
216244961713Sgirish 	boolean_t		is_tcp_udp = B_FALSE;
216344961713Sgirish 	boolean_t		buffer_free = B_FALSE;
216444961713Sgirish 	boolean_t		error_send_up = B_FALSE;
216544961713Sgirish 	uint8_t			error_type;
216644961713Sgirish 	uint16_t		l2_len;
216744961713Sgirish 	uint16_t		skip_len;
216844961713Sgirish 	uint8_t			pktbufsz_type;
216944961713Sgirish 	uint64_t		rcr_entry;
217044961713Sgirish 	uint64_t		*pkt_buf_addr_pp;
217144961713Sgirish 	uint64_t		*pkt_buf_addr_p;
217244961713Sgirish 	uint32_t		buf_offset;
217344961713Sgirish 	uint32_t		bsize;
217444961713Sgirish 	uint32_t		error_disp_cnt;
217544961713Sgirish 	uint32_t		msg_index;
217644961713Sgirish 	p_rx_rbr_ring_t		rx_rbr_p;
2177*86ef0a63SRichard Lowe 	p_rx_msg_t		*rx_msg_ring_p;
217844961713Sgirish 	p_rx_msg_t		rx_msg_p;
217944961713Sgirish 	uint16_t		sw_offset_bytes = 0, hdr_size = 0;
218044961713Sgirish 	nxge_status_t		status = NXGE_OK;
218144961713Sgirish 	boolean_t		is_valid = B_FALSE;
218244961713Sgirish 	p_nxge_rx_ring_stats_t	rdc_stats;
2183a3c5bd6dSspeer 	uint32_t		bytes_read;
2184a3c5bd6dSspeer 	uint64_t		pkt_type;
2185a3c5bd6dSspeer 	uint64_t		frag;
21864202ea4bSsbehera 	boolean_t		pkt_too_long_err = B_FALSE;
218744961713Sgirish #ifdef	NXGE_DEBUG
218844961713Sgirish 	int			dump_len;
218944961713Sgirish #endif
219044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet"));
219144961713Sgirish 	first_entry = (*mp == NULL) ? B_TRUE : B_FALSE;
219244961713Sgirish 
219344961713Sgirish 	rcr_entry = *((uint64_t *)rcr_desc_rd_head_p);
219444961713Sgirish 
219544961713Sgirish 	multi = (rcr_entry & RCR_MULTI_MASK);
219644961713Sgirish 	dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK);
219744961713Sgirish 	pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK);
219844961713Sgirish 
219944961713Sgirish 	error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT);
220044961713Sgirish 	frag = (rcr_entry & RCR_FRAG_MASK);
220144961713Sgirish 
220244961713Sgirish 	l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT);
220344961713Sgirish 
220444961713Sgirish 	pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >>
220552ccf843Smisaki 	    RCR_PKTBUFSZ_SHIFT);
220644961713Sgirish 	pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) <<
220752ccf843Smisaki 	    RCR_PKT_BUF_ADDR_SHIFT);
220844961713Sgirish 
220944961713Sgirish 	channel = rcr_p->rdc;
221044961713Sgirish 
221144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
221252ccf843Smisaki 	    "==> nxge_receive_packet: entryp $%p entry 0x%0llx "
221352ccf843Smisaki 	    "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
221452ccf843Smisaki 	    "error_type 0x%x pkt_type 0x%x  "
221552ccf843Smisaki 	    "pktbufsz_type %d ",
221652ccf843Smisaki 	    rcr_desc_rd_head_p,
221752ccf843Smisaki 	    rcr_entry, pkt_buf_addr_pp, l2_len,
221852ccf843Smisaki 	    multi,
221952ccf843Smisaki 	    error_type,
222052ccf843Smisaki 	    pkt_type,
222152ccf843Smisaki 	    pktbufsz_type));
222244961713Sgirish 
222344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
222452ccf843Smisaki 	    "==> nxge_receive_packet: entryp $%p entry 0x%0llx "
222552ccf843Smisaki 	    "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
222652ccf843Smisaki 	    "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p,
222752ccf843Smisaki 	    rcr_entry, pkt_buf_addr_pp, l2_len,
222852ccf843Smisaki 	    multi,
222952ccf843Smisaki 	    error_type,
223052ccf843Smisaki 	    pkt_type));
223144961713Sgirish 
223244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
223352ccf843Smisaki 	    "==> (rbr) nxge_receive_packet: entry 0x%0llx "
223452ccf843Smisaki 	    "full pkt_buf_addr_pp $%p l2_len %d",
223552ccf843Smisaki 	    rcr_entry, pkt_buf_addr_pp, l2_len));
223644961713Sgirish 
223744961713Sgirish 	/* get the stats ptr */
223844961713Sgirish 	rdc_stats = rcr_p->rdc_stats;
223944961713Sgirish 
224044961713Sgirish 	if (!l2_len) {
224144961713Sgirish 
224244961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
224352ccf843Smisaki 		    "<== nxge_receive_packet: failed: l2 length is 0."));
224444961713Sgirish 		return;
224544961713Sgirish 	}
224644961713Sgirish 
22474202ea4bSsbehera 	/*
2248da14cebeSEric Cheng 	 * Software workaround for BMAC hardware limitation that allows
22494202ea4bSsbehera 	 * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406
22504202ea4bSsbehera 	 * instead of 0x2400 for jumbo.
22514202ea4bSsbehera 	 */
22524202ea4bSsbehera 	if (l2_len > nxgep->mac.maxframesize) {
22534202ea4bSsbehera 		pkt_too_long_err = B_TRUE;
22544202ea4bSsbehera 	}
22554202ea4bSsbehera 
225656d930aeSspeer 	/* Hardware sends us 4 bytes of CRC as no stripping is done.  */
225756d930aeSspeer 	l2_len -= ETHERFCSL;
225856d930aeSspeer 
225944961713Sgirish 	/* shift 6 bits to get the full io address */
226044961713Sgirish 	pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp <<
226152ccf843Smisaki 	    RCR_PKT_BUF_ADDR_SHIFT_FULL);
226244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
226352ccf843Smisaki 	    "==> (rbr) nxge_receive_packet: entry 0x%0llx "
226452ccf843Smisaki 	    "full pkt_buf_addr_pp $%p l2_len %d",
226552ccf843Smisaki 	    rcr_entry, pkt_buf_addr_pp, l2_len));
226644961713Sgirish 
226744961713Sgirish 	rx_rbr_p = rcr_p->rx_rbr_p;
226844961713Sgirish 	rx_msg_ring_p = rx_rbr_p->rx_msg_ring;
226944961713Sgirish 
227044961713Sgirish 	if (first_entry) {
227144961713Sgirish 		hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL :
227252ccf843Smisaki 		    RXDMA_HDR_SIZE_DEFAULT);
227344961713Sgirish 
227444961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
227552ccf843Smisaki 		    "==> nxge_receive_packet: first entry 0x%016llx "
227652ccf843Smisaki 		    "pkt_buf_addr_pp $%p l2_len %d hdr %d",
227752ccf843Smisaki 		    rcr_entry, pkt_buf_addr_pp, l2_len,
227852ccf843Smisaki 		    hdr_size));
227944961713Sgirish 	}
228044961713Sgirish 
228144961713Sgirish 	MUTEX_ENTER(&rx_rbr_p->lock);
228244961713Sgirish 
228344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
228452ccf843Smisaki 	    "==> (rbr 1) nxge_receive_packet: entry 0x%0llx "
228552ccf843Smisaki 	    "full pkt_buf_addr_pp $%p l2_len %d",
228652ccf843Smisaki 	    rcr_entry, pkt_buf_addr_pp, l2_len));
228744961713Sgirish 
228844961713Sgirish 	/*
228944961713Sgirish 	 * Packet buffer address in the completion entry points
229044961713Sgirish 	 * to the starting buffer address (offset 0).
229144961713Sgirish 	 * Use the starting buffer address to locate the corresponding
229244961713Sgirish 	 * kernel address.
229344961713Sgirish 	 */
229444961713Sgirish 	status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p,
229552ccf843Smisaki 	    pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p,
229652ccf843Smisaki 	    &buf_offset,
229752ccf843Smisaki 	    &msg_index);
229844961713Sgirish 
229944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
230052ccf843Smisaki 	    "==> (rbr 2) nxge_receive_packet: entry 0x%0llx "
230152ccf843Smisaki 	    "full pkt_buf_addr_pp $%p l2_len %d",
230252ccf843Smisaki 	    rcr_entry, pkt_buf_addr_pp, l2_len));
230344961713Sgirish 
230444961713Sgirish 	if (status != NXGE_OK) {
230544961713Sgirish 		MUTEX_EXIT(&rx_rbr_p->lock);
230644961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
230752ccf843Smisaki 		    "<== nxge_receive_packet: found vaddr failed %d",
230852ccf843Smisaki 		    status));
230944961713Sgirish 		return;
231044961713Sgirish 	}
231144961713Sgirish 
231244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
231352ccf843Smisaki 	    "==> (rbr 3) nxge_receive_packet: entry 0x%0llx "
231452ccf843Smisaki 	    "full pkt_buf_addr_pp $%p l2_len %d",
231552ccf843Smisaki 	    rcr_entry, pkt_buf_addr_pp, l2_len));
231644961713Sgirish 
231744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
231852ccf843Smisaki 	    "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
231952ccf843Smisaki 	    "full pkt_buf_addr_pp $%p l2_len %d",
232052ccf843Smisaki 	    msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
232144961713Sgirish 
232244961713Sgirish 	rx_msg_p = rx_msg_ring_p[msg_index];
232344961713Sgirish 
232444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
232552ccf843Smisaki 	    "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
232652ccf843Smisaki 	    "full pkt_buf_addr_pp $%p l2_len %d",
232752ccf843Smisaki 	    msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
232844961713Sgirish 
232944961713Sgirish 	switch (pktbufsz_type) {
233044961713Sgirish 	case RCR_PKTBUFSZ_0:
233144961713Sgirish 		bsize = rx_rbr_p->pkt_buf_size0_bytes;
233244961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
233352ccf843Smisaki 		    "==> nxge_receive_packet: 0 buf %d", bsize));
233444961713Sgirish 		break;
233544961713Sgirish 	case RCR_PKTBUFSZ_1:
233644961713Sgirish 		bsize = rx_rbr_p->pkt_buf_size1_bytes;
233744961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
233852ccf843Smisaki 		    "==> nxge_receive_packet: 1 buf %d", bsize));
233944961713Sgirish 		break;
234044961713Sgirish 	case RCR_PKTBUFSZ_2:
234144961713Sgirish 		bsize = rx_rbr_p->pkt_buf_size2_bytes;
234244961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
234352ccf843Smisaki 		    "==> nxge_receive_packet: 2 buf %d", bsize));
234444961713Sgirish 		break;
234544961713Sgirish 	case RCR_SINGLE_BLOCK:
234644961713Sgirish 		bsize = rx_msg_p->block_size;
234744961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
234852ccf843Smisaki 		    "==> nxge_receive_packet: single %d", bsize));
234944961713Sgirish 
235044961713Sgirish 		break;
235144961713Sgirish 	default:
235244961713Sgirish 		MUTEX_EXIT(&rx_rbr_p->lock);
235344961713Sgirish 		return;
235444961713Sgirish 	}
235544961713Sgirish 
23564df55fdeSJanie Lu 	switch (nxge_rdc_buf_offset) {
23574df55fdeSJanie Lu 	case SW_OFFSET_NO_OFFSET:
23584df55fdeSJanie Lu 		sw_offset_bytes = 0;
23594df55fdeSJanie Lu 		break;
23604df55fdeSJanie Lu 	case SW_OFFSET_64:
23614df55fdeSJanie Lu 		sw_offset_bytes = 64;
23624df55fdeSJanie Lu 		break;
23634df55fdeSJanie Lu 	case SW_OFFSET_128:
23644df55fdeSJanie Lu 		sw_offset_bytes = 128;
23654df55fdeSJanie Lu 		break;
23664df55fdeSJanie Lu 	case SW_OFFSET_192:
23674df55fdeSJanie Lu 		sw_offset_bytes = 192;
23684df55fdeSJanie Lu 		break;
23694df55fdeSJanie Lu 	case SW_OFFSET_256:
23704df55fdeSJanie Lu 		sw_offset_bytes = 256;
23714df55fdeSJanie Lu 		break;
23724df55fdeSJanie Lu 	case SW_OFFSET_320:
23734df55fdeSJanie Lu 		sw_offset_bytes = 320;
23744df55fdeSJanie Lu 		break;
23754df55fdeSJanie Lu 	case SW_OFFSET_384:
23764df55fdeSJanie Lu 		sw_offset_bytes = 384;
23774df55fdeSJanie Lu 		break;
23784df55fdeSJanie Lu 	case SW_OFFSET_448:
23794df55fdeSJanie Lu 		sw_offset_bytes = 448;
23804df55fdeSJanie Lu 		break;
23814df55fdeSJanie Lu 	default:
23824df55fdeSJanie Lu 		sw_offset_bytes = 0;
23834df55fdeSJanie Lu 		break;
23844df55fdeSJanie Lu 	}
23854df55fdeSJanie Lu 
238644961713Sgirish 	DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma,
238752ccf843Smisaki 	    (buf_offset + sw_offset_bytes),
238852ccf843Smisaki 	    (hdr_size + l2_len),
238952ccf843Smisaki 	    DDI_DMA_SYNC_FORCPU);
239044961713Sgirish 
239144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
239252ccf843Smisaki 	    "==> nxge_receive_packet: after first dump:usage count"));
239344961713Sgirish 
239444961713Sgirish 	if (rx_msg_p->cur_usage_cnt == 0) {
239544961713Sgirish 		if (rx_rbr_p->rbr_use_bcopy) {
239644961713Sgirish 			atomic_inc_32(&rx_rbr_p->rbr_consumed);
239744961713Sgirish 			if (rx_rbr_p->rbr_consumed <
239852ccf843Smisaki 			    rx_rbr_p->rbr_threshold_hi) {
239944961713Sgirish 				if (rx_rbr_p->rbr_threshold_lo == 0 ||
240052ccf843Smisaki 				    ((rx_rbr_p->rbr_consumed >=
240152ccf843Smisaki 				    rx_rbr_p->rbr_threshold_lo) &&
240252ccf843Smisaki 				    (rx_rbr_p->rbr_bufsize_type >=
240352ccf843Smisaki 				    pktbufsz_type))) {
240444961713Sgirish 					rx_msg_p->rx_use_bcopy = B_TRUE;
240544961713Sgirish 				}
240644961713Sgirish 			} else {
240744961713Sgirish 				rx_msg_p->rx_use_bcopy = B_TRUE;
240844961713Sgirish 			}
240944961713Sgirish 		}
241044961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX2_CTL,
241152ccf843Smisaki 		    "==> nxge_receive_packet: buf %d (new block) ",
241252ccf843Smisaki 		    bsize));
241344961713Sgirish 
241444961713Sgirish 		rx_msg_p->pkt_buf_size_code = pktbufsz_type;
241544961713Sgirish 		rx_msg_p->pkt_buf_size = bsize;
241644961713Sgirish 		rx_msg_p->cur_usage_cnt = 1;
241744961713Sgirish 		if (pktbufsz_type == RCR_SINGLE_BLOCK) {
241844961713Sgirish 			NXGE_DEBUG_MSG((nxgep, RX2_CTL,
241952ccf843Smisaki 			    "==> nxge_receive_packet: buf %d "
242052ccf843Smisaki 			    "(single block) ",
242152ccf843Smisaki 			    bsize));
242244961713Sgirish 			/*
242344961713Sgirish 			 * Buffer can be reused once the free function
242444961713Sgirish 			 * is called.
242544961713Sgirish 			 */
242644961713Sgirish 			rx_msg_p->max_usage_cnt = 1;
242744961713Sgirish 			buffer_free = B_TRUE;
242844961713Sgirish 		} else {
242944961713Sgirish 			rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize;
243044961713Sgirish 			if (rx_msg_p->max_usage_cnt == 1) {
243144961713Sgirish 				buffer_free = B_TRUE;
243244961713Sgirish 			}
243344961713Sgirish 		}
243444961713Sgirish 	} else {
243544961713Sgirish 		rx_msg_p->cur_usage_cnt++;
243644961713Sgirish 		if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) {
243744961713Sgirish 			buffer_free = B_TRUE;
243844961713Sgirish 		}
243944961713Sgirish 	}
244044961713Sgirish 
244144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
244244961713Sgirish 	    "msgbuf index = %d l2len %d bytes usage %d max_usage %d ",
244352ccf843Smisaki 	    msg_index, l2_len,
244452ccf843Smisaki 	    rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt));
244544961713Sgirish 
24464202ea4bSsbehera 	if ((error_type) || (dcf_err) || (pkt_too_long_err)) {
244744961713Sgirish 		rdc_stats->ierrors++;
244844961713Sgirish 		if (dcf_err) {
244944961713Sgirish 			rdc_stats->dcf_err++;
245044961713Sgirish #ifdef	NXGE_DEBUG
245144961713Sgirish 			if (!rdc_stats->dcf_err) {
245244961713Sgirish 				NXGE_DEBUG_MSG((nxgep, RX_CTL,
245344961713Sgirish 				"nxge_receive_packet: channel %d dcf_err rcr"
245444961713Sgirish 				" 0x%llx", channel, rcr_entry));
245544961713Sgirish 			}
245644961713Sgirish #endif
2457b37cc459SToomas Soome 			NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, 0,
245852ccf843Smisaki 			    NXGE_FM_EREPORT_RDMC_DCF_ERR);
24594202ea4bSsbehera 		} else if (pkt_too_long_err) {
24604202ea4bSsbehera 			rdc_stats->pkt_too_long_err++;
24614202ea4bSsbehera 			NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:"
24624202ea4bSsbehera 			    " channel %d packet length [%d] > "
24634202ea4bSsbehera 			    "maxframesize [%d]", channel, l2_len + ETHERFCSL,
24644202ea4bSsbehera 			    nxgep->mac.maxframesize));
246544961713Sgirish 		} else {
246644961713Sgirish 				/* Update error stats */
246744961713Sgirish 			error_disp_cnt = NXGE_ERROR_SHOW_MAX;
246844961713Sgirish 			rdc_stats->errlog.compl_err_type = error_type;
246944961713Sgirish 
247044961713Sgirish 			switch (error_type) {
2471f6485eecSyc 			/*
2472f6485eecSyc 			 * Do not send FMA ereport for RCR_L2_ERROR and
2473f6485eecSyc 			 * RCR_L4_CSUM_ERROR because most likely they indicate
2474f6485eecSyc 			 * back pressure rather than HW failures.
2475f6485eecSyc 			 */
247653f3d8ecSyc 			case RCR_L2_ERROR:
247753f3d8ecSyc 				rdc_stats->l2_err++;
247853f3d8ecSyc 				if (rdc_stats->l2_err <
247953f3d8ecSyc 				    error_disp_cnt) {
248044961713Sgirish 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
248153f3d8ecSyc 					    " nxge_receive_packet:"
248253f3d8ecSyc 					    " channel %d RCR L2_ERROR",
248353f3d8ecSyc 					    channel));
248453f3d8ecSyc 				}
248553f3d8ecSyc 				break;
248653f3d8ecSyc 			case RCR_L4_CSUM_ERROR:
248753f3d8ecSyc 				error_send_up = B_TRUE;
248853f3d8ecSyc 				rdc_stats->l4_cksum_err++;
248953f3d8ecSyc 				if (rdc_stats->l4_cksum_err <
249053f3d8ecSyc 				    error_disp_cnt) {
249153f3d8ecSyc 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
249253f3d8ecSyc 					    " nxge_receive_packet:"
249353f3d8ecSyc 					    " channel %d"
249453f3d8ecSyc 					    " RCR L4_CSUM_ERROR", channel));
249553f3d8ecSyc 				}
249653f3d8ecSyc 				break;
2497f6485eecSyc 			/*
2498f6485eecSyc 			 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and
2499f6485eecSyc 			 * RCR_ZCP_SOFT_ERROR because they reflect the same
2500f6485eecSyc 			 * FFLP and ZCP errors that have been reported by
2501f6485eecSyc 			 * nxge_fflp.c and nxge_zcp.c.
2502f6485eecSyc 			 */
250353f3d8ecSyc 			case RCR_FFLP_SOFT_ERROR:
250453f3d8ecSyc 				error_send_up = B_TRUE;
250553f3d8ecSyc 				rdc_stats->fflp_soft_err++;
250653f3d8ecSyc 				if (rdc_stats->fflp_soft_err <
250753f3d8ecSyc 				    error_disp_cnt) {
250853f3d8ecSyc 					NXGE_ERROR_MSG((nxgep,
250953f3d8ecSyc 					    NXGE_ERR_CTL,
251053f3d8ecSyc 					    " nxge_receive_packet:"
251153f3d8ecSyc 					    " channel %d"
251253f3d8ecSyc 					    " RCR FFLP_SOFT_ERROR", channel));
251353f3d8ecSyc 				}
251453f3d8ecSyc 				break;
251553f3d8ecSyc 			case RCR_ZCP_SOFT_ERROR:
251653f3d8ecSyc 				error_send_up = B_TRUE;
251753f3d8ecSyc 				rdc_stats->fflp_soft_err++;
251853f3d8ecSyc 				if (rdc_stats->zcp_soft_err <
251953f3d8ecSyc 				    error_disp_cnt)
252053f3d8ecSyc 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
252153f3d8ecSyc 					    " nxge_receive_packet: Channel %d"
252253f3d8ecSyc 					    " RCR ZCP_SOFT_ERROR", channel));
252353f3d8ecSyc 				break;
252453f3d8ecSyc 			default:
252553f3d8ecSyc 				rdc_stats->rcr_unknown_err++;
252653f3d8ecSyc 				if (rdc_stats->rcr_unknown_err
252753f3d8ecSyc 				    < error_disp_cnt) {
252853f3d8ecSyc 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
252953f3d8ecSyc 					    " nxge_receive_packet: Channel %d"
253053f3d8ecSyc 					    " RCR entry 0x%llx error 0x%x",
253153f3d8ecSyc 					    rcr_entry, channel, error_type));
253253f3d8ecSyc 				}
253353f3d8ecSyc 				break;
253444961713Sgirish 			}
253544961713Sgirish 		}
253644961713Sgirish 
253744961713Sgirish 		/*
253844961713Sgirish 		 * Update and repost buffer block if max usage
253944961713Sgirish 		 * count is reached.
254044961713Sgirish 		 */
254144961713Sgirish 		if (error_send_up == B_FALSE) {
2542958cea9eSml 			atomic_inc_32(&rx_msg_p->ref_cnt);
254344961713Sgirish 			if (buffer_free == B_TRUE) {
254444961713Sgirish 				rx_msg_p->free = B_TRUE;
254544961713Sgirish 			}
254644961713Sgirish 
254744961713Sgirish 			MUTEX_EXIT(&rx_rbr_p->lock);
254844961713Sgirish 			nxge_freeb(rx_msg_p);
254944961713Sgirish 			return;
255044961713Sgirish 		}
255144961713Sgirish 	}
255244961713Sgirish 
255344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
255452ccf843Smisaki 	    "==> nxge_receive_packet: DMA sync second "));
255544961713Sgirish 
255653f3d8ecSyc 	bytes_read = rcr_p->rcvd_pkt_bytes;
255744961713Sgirish 	skip_len = sw_offset_bytes + hdr_size;
255844961713Sgirish 	if (!rx_msg_p->rx_use_bcopy) {
2559958cea9eSml 		/*
2560958cea9eSml 		 * For loaned up buffers, the driver reference count
2561958cea9eSml 		 * will be incremented first and then the free state.
2562958cea9eSml 		 */
256353f3d8ecSyc 		if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) {
256414ea4bb7Ssd 			if (first_entry) {
256514ea4bb7Ssd 				nmp->b_rptr = &nmp->b_rptr[skip_len];
256653f3d8ecSyc 				if (l2_len < bsize - skip_len) {
256714ea4bb7Ssd 					nmp->b_wptr = &nmp->b_rptr[l2_len];
256853f3d8ecSyc 				} else {
256953f3d8ecSyc 					nmp->b_wptr = &nmp->b_rptr[bsize
257053f3d8ecSyc 					    - skip_len];
257153f3d8ecSyc 				}
257214ea4bb7Ssd 			} else {
257353f3d8ecSyc 				if (l2_len - bytes_read < bsize) {
257414ea4bb7Ssd 					nmp->b_wptr =
257514ea4bb7Ssd 					    &nmp->b_rptr[l2_len - bytes_read];
257653f3d8ecSyc 				} else {
257753f3d8ecSyc 					nmp->b_wptr = &nmp->b_rptr[bsize];
257853f3d8ecSyc 				}
257914ea4bb7Ssd 			}
258044961713Sgirish 		}
258153f3d8ecSyc 	} else {
258253f3d8ecSyc 		if (first_entry) {
258353f3d8ecSyc 			nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len,
258453f3d8ecSyc 			    l2_len < bsize - skip_len ?
258553f3d8ecSyc 			    l2_len : bsize - skip_len);
258653f3d8ecSyc 		} else {
258753f3d8ecSyc 			nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset,
258853f3d8ecSyc 			    l2_len - bytes_read < bsize ?
258953f3d8ecSyc 			    l2_len - bytes_read : bsize);
259053f3d8ecSyc 		}
259153f3d8ecSyc 	}
259253f3d8ecSyc 	if (nmp != NULL) {
2593f720bc57Syc 		if (first_entry) {
2594f720bc57Syc 			/*
2595f720bc57Syc 			 * Jumbo packets may be received with more than one
2596f720bc57Syc 			 * buffer, increment ipackets for the first entry only.
2597f720bc57Syc 			 */
2598f720bc57Syc 			rdc_stats->ipackets++;
2599f720bc57Syc 
2600f720bc57Syc 			/* Update ibytes for kstat. */
2601f720bc57Syc 			rdc_stats->ibytes += skip_len
2602f720bc57Syc 			    + l2_len < bsize ? l2_len : bsize;
2603f720bc57Syc 			/*
2604f720bc57Syc 			 * Update the number of bytes read so far for the
2605f720bc57Syc 			 * current frame.
2606f720bc57Syc 			 */
260753f3d8ecSyc 			bytes_read  = nmp->b_wptr - nmp->b_rptr;
2608f720bc57Syc 		} else {
2609f720bc57Syc 			rdc_stats->ibytes += l2_len - bytes_read < bsize ?
2610f720bc57Syc 			    l2_len - bytes_read : bsize;
261153f3d8ecSyc 			bytes_read += nmp->b_wptr - nmp->b_rptr;
2612f720bc57Syc 		}
261353f3d8ecSyc 
261453f3d8ecSyc 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
261553f3d8ecSyc 		    "==> nxge_receive_packet after dupb: "
261653f3d8ecSyc 		    "rbr consumed %d "
261753f3d8ecSyc 		    "pktbufsz_type %d "
261853f3d8ecSyc 		    "nmp $%p rptr $%p wptr $%p "
261953f3d8ecSyc 		    "buf_offset %d bzise %d l2_len %d skip_len %d",
262053f3d8ecSyc 		    rx_rbr_p->rbr_consumed,
262153f3d8ecSyc 		    pktbufsz_type,
262253f3d8ecSyc 		    nmp, nmp->b_rptr, nmp->b_wptr,
262353f3d8ecSyc 		    buf_offset, bsize, l2_len, skip_len));
262444961713Sgirish 	} else {
262544961713Sgirish 		cmn_err(CE_WARN, "!nxge_receive_packet: "
262652ccf843Smisaki 		    "update stats (error)");
26272e59129aSraghus 		atomic_inc_32(&rx_msg_p->ref_cnt);
26282e59129aSraghus 		if (buffer_free == B_TRUE) {
26292e59129aSraghus 			rx_msg_p->free = B_TRUE;
26302e59129aSraghus 		}
26312e59129aSraghus 		MUTEX_EXIT(&rx_rbr_p->lock);
26322e59129aSraghus 		nxge_freeb(rx_msg_p);
26332e59129aSraghus 		return;
263444961713Sgirish 	}
2635ee5416c9Syc 
263644961713Sgirish 	if (buffer_free == B_TRUE) {
263744961713Sgirish 		rx_msg_p->free = B_TRUE;
263844961713Sgirish 	}
2639f720bc57Syc 
264044961713Sgirish 	is_valid = (nmp != NULL);
264153f3d8ecSyc 
264253f3d8ecSyc 	rcr_p->rcvd_pkt_bytes = bytes_read;
264353f3d8ecSyc 
264444961713Sgirish 	MUTEX_EXIT(&rx_rbr_p->lock);
264544961713Sgirish 
264644961713Sgirish 	if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) {
264744961713Sgirish 		atomic_inc_32(&rx_msg_p->ref_cnt);
264844961713Sgirish 		nxge_freeb(rx_msg_p);
264944961713Sgirish 	}
265044961713Sgirish 
265144961713Sgirish 	if (is_valid) {
2652a3c5bd6dSspeer 		nmp->b_cont = NULL;
265344961713Sgirish 		if (first_entry) {
265444961713Sgirish 			*mp = nmp;
265544961713Sgirish 			*mp_cont = NULL;
265653f3d8ecSyc 		} else {
265744961713Sgirish 			*mp_cont = nmp;
265853f3d8ecSyc 		}
265944961713Sgirish 	}
266044961713Sgirish 
266144961713Sgirish 	/*
2662f720bc57Syc 	 * ERROR, FRAG and PKT_TYPE are only reported in the first entry.
2663f720bc57Syc 	 * If a packet is not fragmented and no error bit is set, then
2664f720bc57Syc 	 * L4 checksum is OK.
266544961713Sgirish 	 */
2666f720bc57Syc 
266744961713Sgirish 	if (is_valid && !multi) {
2668678453a8Sspeer 		/*
2669b4d05839Sml 		 * If the checksum flag nxge_chksum_offload
2670b4d05839Sml 		 * is 1, TCP and UDP packets can be sent
2671678453a8Sspeer 		 * up with good checksum. If the checksum flag
2672b4d05839Sml 		 * is set to 0, checksum reporting will apply to
2673678453a8Sspeer 		 * TCP packets only (workaround for a hardware bug).
2674b4d05839Sml 		 * If the checksum flag nxge_cksum_offload is
2675b4d05839Sml 		 * greater than 1, both TCP and UDP packets
2676b4d05839Sml 		 * will not be reported its hardware checksum results.
2677678453a8Sspeer 		 */
2678b4d05839Sml 		if (nxge_cksum_offload == 1) {
2679678453a8Sspeer 			is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP ||
268052ccf843Smisaki 			    pkt_type == RCR_PKT_IS_UDP) ?
268152ccf843Smisaki 			    B_TRUE: B_FALSE);
2682b4d05839Sml 		} else if (!nxge_cksum_offload) {
2683678453a8Sspeer 			/* TCP checksum only. */
2684678453a8Sspeer 			is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ?
268552ccf843Smisaki 			    B_TRUE: B_FALSE);
2686678453a8Sspeer 		}
268744961713Sgirish 
268844961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: "
268952ccf843Smisaki 		    "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d",
269052ccf843Smisaki 		    is_valid, multi, is_tcp_udp, frag, error_type));
269144961713Sgirish 
269244961713Sgirish 		if (is_tcp_udp && !frag && !error_type) {
26930dc2366fSVenugopal Iyer 			mac_hcksum_set(nmp, 0, 0, 0, 0, HCK_FULLCKSUM_OK);
269444961713Sgirish 			NXGE_DEBUG_MSG((nxgep, RX_CTL,
269552ccf843Smisaki 			    "==> nxge_receive_packet: Full tcp/udp cksum "
269652ccf843Smisaki 			    "is_valid 0x%x multi 0x%llx pkt %d frag %d "
269752ccf843Smisaki 			    "error %d",
269852ccf843Smisaki 			    is_valid, multi, is_tcp_udp, frag, error_type));
269944961713Sgirish 		}
270044961713Sgirish 	}
270144961713Sgirish 
270244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL,
270352ccf843Smisaki 	    "==> nxge_receive_packet: *mp 0x%016llx", *mp));
270444961713Sgirish 
270544961713Sgirish 	*multi_p = (multi == RCR_MULTI_MASK);
270644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: "
270752ccf843Smisaki 	    "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx",
270852ccf843Smisaki 	    *multi_p, nmp, *mp, *mp_cont));
270944961713Sgirish }
271044961713Sgirish 
2711da14cebeSEric Cheng /*
2712da14cebeSEric Cheng  * Enable polling for a ring. Interrupt for the ring is disabled when
2713da14cebeSEric Cheng  * the nxge interrupt comes (see nxge_rx_intr).
2714da14cebeSEric Cheng  */
2715da14cebeSEric Cheng int
nxge_enable_poll(void * arg)2716da14cebeSEric Cheng nxge_enable_poll(void *arg)
2717da14cebeSEric Cheng {
2718da14cebeSEric Cheng 	p_nxge_ring_handle_t	ring_handle = (p_nxge_ring_handle_t)arg;
2719da14cebeSEric Cheng 	p_rx_rcr_ring_t		ringp;
2720da14cebeSEric Cheng 	p_nxge_t		nxgep;
2721da14cebeSEric Cheng 	p_nxge_ldg_t		ldgp;
2722da14cebeSEric Cheng 	uint32_t		channel;
2723da14cebeSEric Cheng 
2724da14cebeSEric Cheng 	if (ring_handle == NULL) {
272563f531d1SSriharsha Basavapatna 		ASSERT(ring_handle != NULL);
2726da14cebeSEric Cheng 		return (0);
2727da14cebeSEric Cheng 	}
2728da14cebeSEric Cheng 
2729da14cebeSEric Cheng 	nxgep = ring_handle->nxgep;
2730da14cebeSEric Cheng 	channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index;
2731da14cebeSEric Cheng 	ringp = nxgep->rx_rcr_rings->rcr_rings[channel];
2732da14cebeSEric Cheng 	NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2733da14cebeSEric Cheng 	    "==> nxge_enable_poll: rdc %d ", ringp->rdc));
2734da14cebeSEric Cheng 	ldgp = ringp->ldgp;
2735da14cebeSEric Cheng 	if (ldgp == NULL) {
2736da14cebeSEric Cheng 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2737da14cebeSEric Cheng 		    "==> nxge_enable_poll: rdc %d NULL ldgp: no change",
2738da14cebeSEric Cheng 		    ringp->rdc));
2739da14cebeSEric Cheng 		return (0);
2740da14cebeSEric Cheng 	}
2741da14cebeSEric Cheng 
2742da14cebeSEric Cheng 	MUTEX_ENTER(&ringp->lock);
2743da14cebeSEric Cheng 	/* enable polling */
2744da14cebeSEric Cheng 	if (ringp->poll_flag == 0) {
2745da14cebeSEric Cheng 		ringp->poll_flag = 1;
2746da14cebeSEric Cheng 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2747da14cebeSEric Cheng 		    "==> nxge_enable_poll: rdc %d set poll flag to 1",
2748da14cebeSEric Cheng 		    ringp->rdc));
2749da14cebeSEric Cheng 	}
2750da14cebeSEric Cheng 
2751da14cebeSEric Cheng 	MUTEX_EXIT(&ringp->lock);
2752da14cebeSEric Cheng 	return (0);
2753da14cebeSEric Cheng }
2754da14cebeSEric Cheng /*
2755da14cebeSEric Cheng  * Disable polling for a ring and enable its interrupt.
2756da14cebeSEric Cheng  */
2757da14cebeSEric Cheng int
nxge_disable_poll(void * arg)2758da14cebeSEric Cheng nxge_disable_poll(void *arg)
2759da14cebeSEric Cheng {
2760da14cebeSEric Cheng 	p_nxge_ring_handle_t	ring_handle = (p_nxge_ring_handle_t)arg;
2761da14cebeSEric Cheng 	p_rx_rcr_ring_t		ringp;
2762da14cebeSEric Cheng 	p_nxge_t		nxgep;
2763da14cebeSEric Cheng 	uint32_t		channel;
2764da14cebeSEric Cheng 
2765da14cebeSEric Cheng 	if (ring_handle == NULL) {
276663f531d1SSriharsha Basavapatna 		ASSERT(ring_handle != NULL);
2767da14cebeSEric Cheng 		return (0);
2768da14cebeSEric Cheng 	}
2769da14cebeSEric Cheng 
2770da14cebeSEric Cheng 	nxgep = ring_handle->nxgep;
2771da14cebeSEric Cheng 	channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index;
2772da14cebeSEric Cheng 	ringp = nxgep->rx_rcr_rings->rcr_rings[channel];
2773da14cebeSEric Cheng 
2774da14cebeSEric Cheng 	NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2775da14cebeSEric Cheng 	    "==> nxge_disable_poll: rdc %d poll_flag %d", ringp->rdc));
2776da14cebeSEric Cheng 
2777da14cebeSEric Cheng 	MUTEX_ENTER(&ringp->lock);
2778da14cebeSEric Cheng 
2779da14cebeSEric Cheng 	/* disable polling: enable interrupt */
2780da14cebeSEric Cheng 	if (ringp->poll_flag) {
2781da14cebeSEric Cheng 		npi_handle_t		handle;
2782da14cebeSEric Cheng 		rx_dma_ctl_stat_t	cs;
2783da14cebeSEric Cheng 		uint8_t			channel;
2784da14cebeSEric Cheng 		p_nxge_ldg_t		ldgp;
2785da14cebeSEric Cheng 
2786da14cebeSEric Cheng 		/*
2787da14cebeSEric Cheng 		 * Get the control and status for this channel.
2788da14cebeSEric Cheng 		 */
2789da14cebeSEric Cheng 		handle = NXGE_DEV_NPI_HANDLE(nxgep);
2790da14cebeSEric Cheng 		channel = ringp->rdc;
2791da14cebeSEric Cheng 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG,
2792da14cebeSEric Cheng 		    channel, &cs.value);
2793da14cebeSEric Cheng 
2794da14cebeSEric Cheng 		/*
2795da14cebeSEric Cheng 		 * Enable mailbox update
2796da14cebeSEric Cheng 		 * Since packets were not read and the hardware uses
2797da14cebeSEric Cheng 		 * bits pktread and ptrread to update the queue
2798da14cebeSEric Cheng 		 * length, we need to set both bits to 0.
2799da14cebeSEric Cheng 		 */
2800da14cebeSEric Cheng 		cs.bits.ldw.pktread = 0;
2801da14cebeSEric Cheng 		cs.bits.ldw.ptrread = 0;
2802da14cebeSEric Cheng 		cs.bits.hdw.mex = 1;
2803da14cebeSEric Cheng 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2804da14cebeSEric Cheng 		    cs.value);
2805da14cebeSEric Cheng 
2806da14cebeSEric Cheng 		/*
2807da14cebeSEric Cheng 		 * Rearm this logical group if this is a single device
2808da14cebeSEric Cheng 		 * group.
2809da14cebeSEric Cheng 		 */
2810da14cebeSEric Cheng 		ldgp = ringp->ldgp;
2811da14cebeSEric Cheng 		if (ldgp == NULL) {
2812da14cebeSEric Cheng 			ringp->poll_flag = 0;
2813da14cebeSEric Cheng 			MUTEX_EXIT(&ringp->lock);
2814da14cebeSEric Cheng 			NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2815da14cebeSEric Cheng 			    "==> nxge_disable_poll: no ldgp rdc %d "
2816da14cebeSEric Cheng 			    "(still set poll to 0", ringp->rdc));
2817da14cebeSEric Cheng 			return (0);
2818da14cebeSEric Cheng 		}
2819da14cebeSEric Cheng 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2820da14cebeSEric Cheng 		    "==> nxge_disable_poll: rdc %d ldgp $%p (enable intr)",
2821da14cebeSEric Cheng 		    ringp->rdc, ldgp));
2822da14cebeSEric Cheng 		if (ldgp->nldvs == 1) {
282363f531d1SSriharsha Basavapatna 			if (isLDOMguest(nxgep)) {
282463f531d1SSriharsha Basavapatna 				ldgp->arm = B_TRUE;
282563f531d1SSriharsha Basavapatna 				nxge_hio_ldgimgn(nxgep, ldgp);
282663f531d1SSriharsha Basavapatna 			} else {
282763f531d1SSriharsha Basavapatna 				ldgimgm_t	mgm;
282863f531d1SSriharsha Basavapatna 				mgm.value = 0;
282963f531d1SSriharsha Basavapatna 				mgm.bits.ldw.arm = 1;
283063f531d1SSriharsha Basavapatna 				mgm.bits.ldw.timer = ldgp->ldg_timer;
283163f531d1SSriharsha Basavapatna 				NXGE_REG_WR64(handle,
283263f531d1SSriharsha Basavapatna 				    LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
283363f531d1SSriharsha Basavapatna 				    mgm.value);
283463f531d1SSriharsha Basavapatna 			}
2835da14cebeSEric Cheng 		}
2836da14cebeSEric Cheng 		ringp->poll_flag = 0;
2837da14cebeSEric Cheng 	}
2838da14cebeSEric Cheng 
2839da14cebeSEric Cheng 	MUTEX_EXIT(&ringp->lock);
2840da14cebeSEric Cheng 	return (0);
2841da14cebeSEric Cheng }
2842da14cebeSEric Cheng 
2843da14cebeSEric Cheng /*
2844da14cebeSEric Cheng  * Poll 'bytes_to_pickup' bytes of message from the rx ring.
2845da14cebeSEric Cheng  */
2846da14cebeSEric Cheng mblk_t *
nxge_rx_poll(void * arg,int bytes_to_pickup)2847da14cebeSEric Cheng nxge_rx_poll(void *arg, int bytes_to_pickup)
2848da14cebeSEric Cheng {
2849da14cebeSEric Cheng 	p_nxge_ring_handle_t	ring_handle = (p_nxge_ring_handle_t)arg;
2850da14cebeSEric Cheng 	p_rx_rcr_ring_t		rcr_p;
2851da14cebeSEric Cheng 	p_nxge_t		nxgep;
2852da14cebeSEric Cheng 	npi_handle_t		handle;
2853da14cebeSEric Cheng 	rx_dma_ctl_stat_t	cs;
2854da14cebeSEric Cheng 	mblk_t			*mblk;
2855da14cebeSEric Cheng 	p_nxge_ldv_t		ldvp;
2856da14cebeSEric Cheng 	uint32_t		channel;
2857da14cebeSEric Cheng 
2858da14cebeSEric Cheng 	nxgep = ring_handle->nxgep;
2859da14cebeSEric Cheng 
2860da14cebeSEric Cheng 	/*
2861da14cebeSEric Cheng 	 * Get the control and status for this channel.
2862da14cebeSEric Cheng 	 */
2863da14cebeSEric Cheng 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
2864da14cebeSEric Cheng 	channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index;
2865da14cebeSEric Cheng 	rcr_p = nxgep->rx_rcr_rings->rcr_rings[channel];
2866da14cebeSEric Cheng 	MUTEX_ENTER(&rcr_p->lock);
2867da14cebeSEric Cheng 	ASSERT(rcr_p->poll_flag == 1);
2868da14cebeSEric Cheng 
2869da14cebeSEric Cheng 	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, rcr_p->rdc, &cs.value);
2870da14cebeSEric Cheng 
2871da14cebeSEric Cheng 	NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2872da14cebeSEric Cheng 	    "==> nxge_rx_poll: calling nxge_rx_pkts: rdc %d poll_flag %d",
2873da14cebeSEric Cheng 	    rcr_p->rdc, rcr_p->poll_flag));
2874da14cebeSEric Cheng 	mblk = nxge_rx_pkts(nxgep, rcr_p, cs, bytes_to_pickup);
2875da14cebeSEric Cheng 
2876da14cebeSEric Cheng 	ldvp = rcr_p->ldvp;
2877da14cebeSEric Cheng 	/* error events. */
2878da14cebeSEric Cheng 	if (ldvp && (cs.value & RX_DMA_CTL_STAT_ERROR)) {
2879da14cebeSEric Cheng 		(void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, cs);
2880da14cebeSEric Cheng 	}
2881da14cebeSEric Cheng 
2882da14cebeSEric Cheng 	MUTEX_EXIT(&rcr_p->lock);
2883da14cebeSEric Cheng 
2884da14cebeSEric Cheng 	NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2885da14cebeSEric Cheng 	    "<== nxge_rx_poll: rdc %d mblk $%p", rcr_p->rdc, mblk));
2886da14cebeSEric Cheng 	return (mblk);
2887da14cebeSEric Cheng }
2888da14cebeSEric Cheng 
2889da14cebeSEric Cheng 
289044961713Sgirish /*ARGSUSED*/
289144961713Sgirish static nxge_status_t
nxge_rx_err_evnts(p_nxge_t nxgep,int channel,rx_dma_ctl_stat_t cs)2892678453a8Sspeer nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs)
289344961713Sgirish {
289444961713Sgirish 	p_nxge_rx_ring_stats_t	rdc_stats;
289544961713Sgirish 	npi_handle_t		handle;
289644961713Sgirish 	npi_status_t		rs;
289744961713Sgirish 	boolean_t		rxchan_fatal = B_FALSE;
289844961713Sgirish 	boolean_t		rxport_fatal = B_FALSE;
289944961713Sgirish 	uint8_t			portn;
290044961713Sgirish 	nxge_status_t		status = NXGE_OK;
290144961713Sgirish 	uint32_t		error_disp_cnt = NXGE_ERROR_SHOW_MAX;
290244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts"));
290344961713Sgirish 
290444961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
290544961713Sgirish 	portn = nxgep->mac.portnum;
2906678453a8Sspeer 	rdc_stats = &nxgep->statsp->rdc_stats[channel];
290744961713Sgirish 
290844961713Sgirish 	if (cs.bits.hdw.rbr_tmout) {
290944961713Sgirish 		rdc_stats->rx_rbr_tmout++;
291044961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
291152ccf843Smisaki 		    NXGE_FM_EREPORT_RDMC_RBR_TMOUT);
291244961713Sgirish 		rxchan_fatal = B_TRUE;
291344961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
291452ccf843Smisaki 		    "==> nxge_rx_err_evnts: rx_rbr_timeout"));
291544961713Sgirish 	}
291644961713Sgirish 	if (cs.bits.hdw.rsp_cnt_err) {
291744961713Sgirish 		rdc_stats->rsp_cnt_err++;
291844961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
291952ccf843Smisaki 		    NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR);
292044961713Sgirish 		rxchan_fatal = B_TRUE;
292144961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
292252ccf843Smisaki 		    "==> nxge_rx_err_evnts(channel %d): "
292352ccf843Smisaki 		    "rsp_cnt_err", channel));
292444961713Sgirish 	}
292544961713Sgirish 	if (cs.bits.hdw.byte_en_bus) {
292644961713Sgirish 		rdc_stats->byte_en_bus++;
292744961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
292852ccf843Smisaki 		    NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS);
292944961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
293052ccf843Smisaki 		    "==> nxge_rx_err_evnts(channel %d): "
293152ccf843Smisaki 		    "fatal error: byte_en_bus", channel));
293244961713Sgirish 		rxchan_fatal = B_TRUE;
293344961713Sgirish 	}
293444961713Sgirish 	if (cs.bits.hdw.rsp_dat_err) {
293544961713Sgirish 		rdc_stats->rsp_dat_err++;
293644961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
293752ccf843Smisaki 		    NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR);
293844961713Sgirish 		rxchan_fatal = B_TRUE;
293944961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
294052ccf843Smisaki 		    "==> nxge_rx_err_evnts(channel %d): "
294152ccf843Smisaki 		    "fatal error: rsp_dat_err", channel));
294244961713Sgirish 	}
294344961713Sgirish 	if (cs.bits.hdw.rcr_ack_err) {
294444961713Sgirish 		rdc_stats->rcr_ack_err++;
294544961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
294652ccf843Smisaki 		    NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR);
294744961713Sgirish 		rxchan_fatal = B_TRUE;
294844961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
294952ccf843Smisaki 		    "==> nxge_rx_err_evnts(channel %d): "
295052ccf843Smisaki 		    "fatal error: rcr_ack_err", channel));
295144961713Sgirish 	}
295244961713Sgirish 	if (cs.bits.hdw.dc_fifo_err) {
295344961713Sgirish 		rdc_stats->dc_fifo_err++;
295444961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
295552ccf843Smisaki 		    NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR);
295644961713Sgirish 		/* This is not a fatal error! */
295744961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
295852ccf843Smisaki 		    "==> nxge_rx_err_evnts(channel %d): "
295952ccf843Smisaki 		    "dc_fifo_err", channel));
296044961713Sgirish 		rxport_fatal = B_TRUE;
296144961713Sgirish 	}
296244961713Sgirish 	if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) {
296344961713Sgirish 		if ((rs = npi_rxdma_ring_perr_stat_get(handle,
296452ccf843Smisaki 		    &rdc_stats->errlog.pre_par,
296552ccf843Smisaki 		    &rdc_stats->errlog.sha_par))
296652ccf843Smisaki 		    != NPI_SUCCESS) {
296744961713Sgirish 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
296852ccf843Smisaki 			    "==> nxge_rx_err_evnts(channel %d): "
296952ccf843Smisaki 			    "rcr_sha_par: get perr", channel));
297044961713Sgirish 			return (NXGE_ERROR | rs);
297144961713Sgirish 		}
297244961713Sgirish 		if (cs.bits.hdw.rcr_sha_par) {
297344961713Sgirish 			rdc_stats->rcr_sha_par++;
297444961713Sgirish 			NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
297552ccf843Smisaki 			    NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR);
297644961713Sgirish 			rxchan_fatal = B_TRUE;
297744961713Sgirish 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
297852ccf843Smisaki 			    "==> nxge_rx_err_evnts(channel %d): "
297952ccf843Smisaki 			    "fatal error: rcr_sha_par", channel));
298044961713Sgirish 		}
298144961713Sgirish 		if (cs.bits.hdw.rbr_pre_par) {
298244961713Sgirish 			rdc_stats->rbr_pre_par++;
298344961713Sgirish 			NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
298452ccf843Smisaki 			    NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR);
298544961713Sgirish 			rxchan_fatal = B_TRUE;
298644961713Sgirish 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
298752ccf843Smisaki 			    "==> nxge_rx_err_evnts(channel %d): "
298852ccf843Smisaki 			    "fatal error: rbr_pre_par", channel));
298944961713Sgirish 		}
299044961713Sgirish 	}
299163e23a19Syc 	/*
299263e23a19Syc 	 * The Following 4 status bits are for information, the system
299363e23a19Syc 	 * is running fine. There is no need to send FMA ereports or
299463e23a19Syc 	 * log messages.
299563e23a19Syc 	 */
299644961713Sgirish 	if (cs.bits.hdw.port_drop_pkt) {
299744961713Sgirish 		rdc_stats->port_drop_pkt++;
299844961713Sgirish 	}
299944961713Sgirish 	if (cs.bits.hdw.wred_drop) {
300044961713Sgirish 		rdc_stats->wred_drop++;
300144961713Sgirish 	}
300244961713Sgirish 	if (cs.bits.hdw.rbr_pre_empty) {
300344961713Sgirish 		rdc_stats->rbr_pre_empty++;
300444961713Sgirish 	}
300544961713Sgirish 	if (cs.bits.hdw.rcr_shadow_full) {
300644961713Sgirish 		rdc_stats->rcr_shadow_full++;
300744961713Sgirish 	}
300844961713Sgirish 	if (cs.bits.hdw.config_err) {
300944961713Sgirish 		rdc_stats->config_err++;
301044961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
301152ccf843Smisaki 		    NXGE_FM_EREPORT_RDMC_CONFIG_ERR);
301244961713Sgirish 		rxchan_fatal = B_TRUE;
301344961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
301452ccf843Smisaki 		    "==> nxge_rx_err_evnts(channel %d): "
301552ccf843Smisaki 		    "config error", channel));
301644961713Sgirish 	}
301744961713Sgirish 	if (cs.bits.hdw.rcrincon) {
301844961713Sgirish 		rdc_stats->rcrincon++;
301944961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
302052ccf843Smisaki 		    NXGE_FM_EREPORT_RDMC_RCRINCON);
302144961713Sgirish 		rxchan_fatal = B_TRUE;
302244961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
302352ccf843Smisaki 		    "==> nxge_rx_err_evnts(channel %d): "
302452ccf843Smisaki 		    "fatal error: rcrincon error", channel));
302544961713Sgirish 	}
302644961713Sgirish 	if (cs.bits.hdw.rcrfull) {
302744961713Sgirish 		rdc_stats->rcrfull++;
302844961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
302952ccf843Smisaki 		    NXGE_FM_EREPORT_RDMC_RCRFULL);
303044961713Sgirish 		rxchan_fatal = B_TRUE;
30314df3b64dSToomas Soome 		if (rdc_stats->rcrfull < error_disp_cnt) {
30324df3b64dSToomas Soome 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
30334df3b64dSToomas Soome 			    "==> nxge_rx_err_evnts(channel %d): "
30344df3b64dSToomas Soome 			    "fatal error: rcrfull error", channel));
30354df3b64dSToomas Soome 		}
303644961713Sgirish 	}
303744961713Sgirish 	if (cs.bits.hdw.rbr_empty) {
303863e23a19Syc 		/*
303963e23a19Syc 		 * This bit is for information, there is no need
304063e23a19Syc 		 * send FMA ereport or log a message.
304163e23a19Syc 		 */
304244961713Sgirish 		rdc_stats->rbr_empty++;
304344961713Sgirish 	}
304444961713Sgirish 	if (cs.bits.hdw.rbrfull) {
304544961713Sgirish 		rdc_stats->rbrfull++;
304644961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
304752ccf843Smisaki 		    NXGE_FM_EREPORT_RDMC_RBRFULL);
304844961713Sgirish 		rxchan_fatal = B_TRUE;
304944961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
305052ccf843Smisaki 		    "==> nxge_rx_err_evnts(channel %d): "
305152ccf843Smisaki 		    "fatal error: rbr_full error", channel));
305244961713Sgirish 	}
305344961713Sgirish 	if (cs.bits.hdw.rbrlogpage) {
305444961713Sgirish 		rdc_stats->rbrlogpage++;
305544961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
305652ccf843Smisaki 		    NXGE_FM_EREPORT_RDMC_RBRLOGPAGE);
305744961713Sgirish 		rxchan_fatal = B_TRUE;
305844961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
305952ccf843Smisaki 		    "==> nxge_rx_err_evnts(channel %d): "
306052ccf843Smisaki 		    "fatal error: rbr logical page error", channel));
306144961713Sgirish 	}
306244961713Sgirish 	if (cs.bits.hdw.cfiglogpage) {
306344961713Sgirish 		rdc_stats->cfiglogpage++;
306444961713Sgirish 		NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
306552ccf843Smisaki 		    NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE);
306644961713Sgirish 		rxchan_fatal = B_TRUE;
306744961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
306852ccf843Smisaki 		    "==> nxge_rx_err_evnts(channel %d): "
306952ccf843Smisaki 		    "fatal error: cfig logical page error", channel));
307044961713Sgirish 	}
307144961713Sgirish 
307244961713Sgirish 	if (rxport_fatal)  {
307344961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3074678453a8Sspeer 		    " nxge_rx_err_evnts: fatal error on Port #%d\n",
3075678453a8Sspeer 		    portn));
3076678453a8Sspeer 		if (isLDOMguest(nxgep)) {
3077678453a8Sspeer 			status = NXGE_ERROR;
3078678453a8Sspeer 		} else {
3079678453a8Sspeer 			status = nxge_ipp_fatal_err_recover(nxgep);
3080678453a8Sspeer 			if (status == NXGE_OK) {
3081678453a8Sspeer 				FM_SERVICE_RESTORED(nxgep);
3082678453a8Sspeer 			}
308344961713Sgirish 		}
308444961713Sgirish 	}
308544961713Sgirish 
308644961713Sgirish 	if (rxchan_fatal) {
308744961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3088678453a8Sspeer 		    " nxge_rx_err_evnts: fatal error on Channel #%d\n",
3089678453a8Sspeer 		    channel));
3090678453a8Sspeer 		if (isLDOMguest(nxgep)) {
3091678453a8Sspeer 			status = NXGE_ERROR;
3092678453a8Sspeer 		} else {
3093678453a8Sspeer 			status = nxge_rxdma_fatal_err_recover(nxgep, channel);
3094678453a8Sspeer 			if (status == NXGE_OK) {
3095678453a8Sspeer 				FM_SERVICE_RESTORED(nxgep);
3096678453a8Sspeer 			}
309744961713Sgirish 		}
309844961713Sgirish 	}
309944961713Sgirish 
310044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts"));
310144961713Sgirish 
310244961713Sgirish 	return (status);
310344961713Sgirish }
310444961713Sgirish 
3105678453a8Sspeer /*
3106678453a8Sspeer  * nxge_rdc_hvio_setup
3107678453a8Sspeer  *
3108678453a8Sspeer  *	This code appears to setup some Hypervisor variables.
3109678453a8Sspeer  *
3110678453a8Sspeer  * Arguments:
3111*86ef0a63SRichard Lowe  *	nxgep
3112*86ef0a63SRichard Lowe  *	channel
3113678453a8Sspeer  *
3114678453a8Sspeer  * Notes:
3115678453a8Sspeer  *	What does NIU_LP_WORKAROUND mean?
3116678453a8Sspeer  *
3117678453a8Sspeer  * NPI/NXGE function calls:
3118678453a8Sspeer  *	na
3119678453a8Sspeer  *
3120678453a8Sspeer  * Context:
3121678453a8Sspeer  *	Any domain
3122678453a8Sspeer  */
3123678453a8Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3124678453a8Sspeer static void
nxge_rdc_hvio_setup(nxge_t * nxgep,int channel)3125678453a8Sspeer nxge_rdc_hvio_setup(
3126678453a8Sspeer 	nxge_t *nxgep, int channel)
312744961713Sgirish {
3128678453a8Sspeer 	nxge_dma_common_t	*dma_common;
3129678453a8Sspeer 	nxge_dma_common_t	*dma_control;
3130678453a8Sspeer 	rx_rbr_ring_t		*ring;
3131678453a8Sspeer 
3132678453a8Sspeer 	ring = nxgep->rx_rbr_rings->rbr_rings[channel];
3133678453a8Sspeer 	dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
3134678453a8Sspeer 
3135678453a8Sspeer 	ring->hv_set = B_FALSE;
3136678453a8Sspeer 
3137678453a8Sspeer 	ring->hv_rx_buf_base_ioaddr_pp = (uint64_t)
3138678453a8Sspeer 	    dma_common->orig_ioaddr_pp;
3139678453a8Sspeer 	ring->hv_rx_buf_ioaddr_size = (uint64_t)
3140678453a8Sspeer 	    dma_common->orig_alength;
3141678453a8Sspeer 
3142678453a8Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: "
3143678453a8Sspeer 	    "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)",
3144678453a8Sspeer 	    channel, ring->hv_rx_buf_base_ioaddr_pp,
3145678453a8Sspeer 	    dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size,
3146678453a8Sspeer 	    dma_common->orig_alength, dma_common->orig_alength));
3147678453a8Sspeer 
3148678453a8Sspeer 	dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
3149678453a8Sspeer 
3150678453a8Sspeer 	ring->hv_rx_cntl_base_ioaddr_pp =
3151678453a8Sspeer 	    (uint64_t)dma_control->orig_ioaddr_pp;
3152678453a8Sspeer 	ring->hv_rx_cntl_ioaddr_size =
3153678453a8Sspeer 	    (uint64_t)dma_control->orig_alength;
3154678453a8Sspeer 
3155678453a8Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: "
3156678453a8Sspeer 	    "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)",
3157678453a8Sspeer 	    channel, ring->hv_rx_cntl_base_ioaddr_pp,
3158678453a8Sspeer 	    dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size,
3159678453a8Sspeer 	    dma_control->orig_alength, dma_control->orig_alength));
3160678453a8Sspeer }
316144961713Sgirish #endif
316244961713Sgirish 
3163678453a8Sspeer /*
3164678453a8Sspeer  * nxge_map_rxdma
3165678453a8Sspeer  *
3166678453a8Sspeer  *	Map an RDC into our kernel space.
3167678453a8Sspeer  *
3168678453a8Sspeer  * Arguments:
3169*86ef0a63SRichard Lowe  *	nxgep
3170*86ef0a63SRichard Lowe  *	channel	The channel to map.
3171678453a8Sspeer  *
3172678453a8Sspeer  * Notes:
3173678453a8Sspeer  *	1. Allocate & initialise a memory pool, if necessary.
3174678453a8Sspeer  *	2. Allocate however many receive buffers are required.
3175678453a8Sspeer  *	3. Setup buffers, descriptors, and mailbox.
3176678453a8Sspeer  *
3177678453a8Sspeer  * NPI/NXGE function calls:
3178678453a8Sspeer  *	nxge_alloc_rx_mem_pool()
3179678453a8Sspeer  *	nxge_alloc_rbb()
3180678453a8Sspeer  *	nxge_map_rxdma_channel()
3181678453a8Sspeer  *
3182678453a8Sspeer  * Registers accessed:
3183678453a8Sspeer  *
3184678453a8Sspeer  * Context:
3185678453a8Sspeer  *	Any domain
3186678453a8Sspeer  */
3187678453a8Sspeer static nxge_status_t
nxge_map_rxdma(p_nxge_t nxgep,int channel)3188678453a8Sspeer nxge_map_rxdma(p_nxge_t nxgep, int channel)
3189678453a8Sspeer {
3190678453a8Sspeer 	nxge_dma_common_t	**data;
3191678453a8Sspeer 	nxge_dma_common_t	**control;
3192678453a8Sspeer 	rx_rbr_ring_t		**rbr_ring;
3193678453a8Sspeer 	rx_rcr_ring_t		**rcr_ring;
3194678453a8Sspeer 	rx_mbox_t		**mailbox;
3195678453a8Sspeer 	uint32_t		chunks;
319644961713Sgirish 
3197678453a8Sspeer 	nxge_status_t		status;
319844961713Sgirish 
3199678453a8Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma"));
320044961713Sgirish 
3201678453a8Sspeer 	if (!nxgep->rx_buf_pool_p) {
3202678453a8Sspeer 		if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) {
3203678453a8Sspeer 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3204678453a8Sspeer 			    "<== nxge_map_rxdma: buf not allocated"));
3205678453a8Sspeer 			return (NXGE_ERROR);
3206678453a8Sspeer 		}
320744961713Sgirish 	}
320844961713Sgirish 
3209678453a8Sspeer 	if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK)
3210678453a8Sspeer 		return (NXGE_ERROR);
321114ea4bb7Ssd 
321244961713Sgirish 	/*
3213678453a8Sspeer 	 * Map descriptors from the buffer polls for each dma channel.
321444961713Sgirish 	 */
321544961713Sgirish 
3216678453a8Sspeer 	/*
3217678453a8Sspeer 	 * Set up and prepare buffer blocks, descriptors
3218678453a8Sspeer 	 * and mailbox.
3219678453a8Sspeer 	 */
3220678453a8Sspeer 	data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
3221678453a8Sspeer 	rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel];
3222678453a8Sspeer 	chunks = nxgep->rx_buf_pool_p->num_chunks[channel];
322344961713Sgirish 
3224678453a8Sspeer 	control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
3225678453a8Sspeer 	rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel];
322644961713Sgirish 
3227678453a8Sspeer 	mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel];
322844961713Sgirish 
3229678453a8Sspeer 	status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring,
3230678453a8Sspeer 	    chunks, control, rcr_ring, mailbox);
3231678453a8Sspeer 	if (status != NXGE_OK) {
3232678453a8Sspeer 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
323352ccf843Smisaki 		    "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) "
323452ccf843Smisaki 		    "returned 0x%x",
323552ccf843Smisaki 		    channel, status));
3236678453a8Sspeer 		return (status);
3237678453a8Sspeer 	}
3238678453a8Sspeer 	nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel;
3239678453a8Sspeer 	nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel;
3240678453a8Sspeer 	nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats =
3241678453a8Sspeer 	    &nxgep->statsp->rdc_stats[channel];
324244961713Sgirish 
3243678453a8Sspeer #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
3244678453a8Sspeer 	if (!isLDOMguest(nxgep))
3245678453a8Sspeer 		nxge_rdc_hvio_setup(nxgep, channel);
3246678453a8Sspeer #endif
324744961713Sgirish 
324844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3249678453a8Sspeer 	    "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel));
325044961713Sgirish 
325144961713Sgirish 	return (status);
325244961713Sgirish }
325344961713Sgirish 
325444961713Sgirish static void
nxge_unmap_rxdma(p_nxge_t nxgep,int channel)3255678453a8Sspeer nxge_unmap_rxdma(p_nxge_t nxgep, int channel)
325644961713Sgirish {
3257678453a8Sspeer 	rx_rbr_ring_t	*rbr_ring;
3258678453a8Sspeer 	rx_rcr_ring_t	*rcr_ring;
3259678453a8Sspeer 	rx_mbox_t	*mailbox;
326044961713Sgirish 
3261678453a8Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel));
326244961713Sgirish 
3263678453a8Sspeer 	if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings ||
3264678453a8Sspeer 	    !nxgep->rx_mbox_areas_p)
326544961713Sgirish 		return;
326644961713Sgirish 
3267678453a8Sspeer 	rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel];
3268678453a8Sspeer 	rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel];
3269678453a8Sspeer 	mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel];
327044961713Sgirish 
3271678453a8Sspeer 	if (!rbr_ring || !rcr_ring || !mailbox)
3272678453a8Sspeer 		return;
327344961713Sgirish 
3274678453a8Sspeer 	(void) nxge_unmap_rxdma_channel(
327552ccf843Smisaki 	    nxgep, channel, rbr_ring, rcr_ring, mailbox);
327644961713Sgirish 
3277678453a8Sspeer 	nxge_free_rxb(nxgep, channel);
327844961713Sgirish 
3279678453a8Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma"));
328044961713Sgirish }
328144961713Sgirish 
328244961713Sgirish nxge_status_t
nxge_map_rxdma_channel(p_nxge_t nxgep,uint16_t channel,p_nxge_dma_common_t * dma_buf_p,p_rx_rbr_ring_t * rbr_p,uint32_t num_chunks,p_nxge_dma_common_t * dma_cntl_p,p_rx_rcr_ring_t * rcr_p,p_rx_mbox_t * rx_mbox_p)328344961713Sgirish nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
328444961713Sgirish     p_nxge_dma_common_t *dma_buf_p,  p_rx_rbr_ring_t *rbr_p,
328544961713Sgirish     uint32_t num_chunks,
328644961713Sgirish     p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p,
328744961713Sgirish     p_rx_mbox_t *rx_mbox_p)
328844961713Sgirish {
328944961713Sgirish 	int	status = NXGE_OK;
329044961713Sgirish 
329144961713Sgirish 	/*
329244961713Sgirish 	 * Set up and prepare buffer blocks, descriptors
329344961713Sgirish 	 * and mailbox.
329444961713Sgirish 	 */
329544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
329652ccf843Smisaki 	    "==> nxge_map_rxdma_channel (channel %d)", channel));
329744961713Sgirish 	/*
329844961713Sgirish 	 * Receive buffer blocks
329944961713Sgirish 	 */
330044961713Sgirish 	status = nxge_map_rxdma_channel_buf_ring(nxgep, channel,
330152ccf843Smisaki 	    dma_buf_p, rbr_p, num_chunks);
330244961713Sgirish 	if (status != NXGE_OK) {
330344961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
330452ccf843Smisaki 		    "==> nxge_map_rxdma_channel (channel %d): "
330552ccf843Smisaki 		    "map buffer failed 0x%x", channel, status));
330644961713Sgirish 		goto nxge_map_rxdma_channel_exit;
330744961713Sgirish 	}
330844961713Sgirish 
330944961713Sgirish 	/*
331044961713Sgirish 	 * Receive block ring, completion ring and mailbox.
331144961713Sgirish 	 */
331244961713Sgirish 	status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel,
331352ccf843Smisaki 	    dma_cntl_p, rbr_p, rcr_p, rx_mbox_p);
331444961713Sgirish 	if (status != NXGE_OK) {
331544961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
331652ccf843Smisaki 		    "==> nxge_map_rxdma_channel (channel %d): "
331752ccf843Smisaki 		    "map config failed 0x%x", channel, status));
331844961713Sgirish 		goto nxge_map_rxdma_channel_fail2;
331944961713Sgirish 	}
332044961713Sgirish 
332144961713Sgirish 	goto nxge_map_rxdma_channel_exit;
332244961713Sgirish 
332344961713Sgirish nxge_map_rxdma_channel_fail3:
332444961713Sgirish 	/* Free rbr, rcr */
332544961713Sgirish 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
332652ccf843Smisaki 	    "==> nxge_map_rxdma_channel: free rbr/rcr "
332752ccf843Smisaki 	    "(status 0x%x channel %d)",
332852ccf843Smisaki 	    status, channel));
332944961713Sgirish 	nxge_unmap_rxdma_channel_cfg_ring(nxgep,
333052ccf843Smisaki 	    *rcr_p, *rx_mbox_p);
333144961713Sgirish 
333244961713Sgirish nxge_map_rxdma_channel_fail2:
333344961713Sgirish 	/* Free buffer blocks */
333444961713Sgirish 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
333552ccf843Smisaki 	    "==> nxge_map_rxdma_channel: free rx buffers"
333652ccf843Smisaki 	    "(nxgep 0x%x status 0x%x channel %d)",
333752ccf843Smisaki 	    nxgep, status, channel));
333844961713Sgirish 	nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p);
333944961713Sgirish 
334056d930aeSspeer 	status = NXGE_ERROR;
334156d930aeSspeer 
334244961713Sgirish nxge_map_rxdma_channel_exit:
334344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
334452ccf843Smisaki 	    "<== nxge_map_rxdma_channel: "
334552ccf843Smisaki 	    "(nxgep 0x%x status 0x%x channel %d)",
334652ccf843Smisaki 	    nxgep, status, channel));
334744961713Sgirish 
334844961713Sgirish 	return (status);
334944961713Sgirish }
335044961713Sgirish 
335144961713Sgirish /*ARGSUSED*/
335244961713Sgirish static void
nxge_unmap_rxdma_channel(p_nxge_t nxgep,uint16_t channel,p_rx_rbr_ring_t rbr_p,p_rx_rcr_ring_t rcr_p,p_rx_mbox_t rx_mbox_p)335344961713Sgirish nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
335444961713Sgirish     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
335544961713Sgirish {
335644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
335752ccf843Smisaki 	    "==> nxge_unmap_rxdma_channel (channel %d)", channel));
335844961713Sgirish 
335944961713Sgirish 	/*
336044961713Sgirish 	 * unmap receive block ring, completion ring and mailbox.
336144961713Sgirish 	 */
336244961713Sgirish 	(void) nxge_unmap_rxdma_channel_cfg_ring(nxgep,
336352ccf843Smisaki 	    rcr_p, rx_mbox_p);
336444961713Sgirish 
336544961713Sgirish 	/* unmap buffer blocks */
336644961713Sgirish 	(void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p);
336744961713Sgirish 
336844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel"));
336944961713Sgirish }
337044961713Sgirish 
337144961713Sgirish /*ARGSUSED*/
337244961713Sgirish static nxge_status_t
nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep,uint16_t dma_channel,p_nxge_dma_common_t * dma_cntl_p,p_rx_rbr_ring_t * rbr_p,p_rx_rcr_ring_t * rcr_p,p_rx_mbox_t * rx_mbox_p)337344961713Sgirish nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel,
337444961713Sgirish     p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p,
337544961713Sgirish     p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p)
337644961713Sgirish {
3377*86ef0a63SRichard Lowe 	p_rx_rbr_ring_t		rbrp;
3378*86ef0a63SRichard Lowe 	p_rx_rcr_ring_t		rcrp;
3379*86ef0a63SRichard Lowe 	p_rx_mbox_t		mboxp;
3380*86ef0a63SRichard Lowe 	p_nxge_dma_common_t	cntl_dmap;
3381*86ef0a63SRichard Lowe 	p_nxge_dma_common_t	dmap;
3382*86ef0a63SRichard Lowe 	p_rx_msg_t		*rx_msg_ring;
3383*86ef0a63SRichard Lowe 	p_rx_msg_t		rx_msg_p;
338444961713Sgirish 	p_rbr_cfig_a_t		rcfga_p;
338544961713Sgirish 	p_rbr_cfig_b_t		rcfgb_p;
338644961713Sgirish 	p_rcrcfig_a_t		cfga_p;
338744961713Sgirish 	p_rcrcfig_b_t		cfgb_p;
338844961713Sgirish 	p_rxdma_cfig1_t		cfig1_p;
338944961713Sgirish 	p_rxdma_cfig2_t		cfig2_p;
339044961713Sgirish 	p_rbr_kick_t		kick_p;
339144961713Sgirish 	uint32_t		dmaaddrp;
339244961713Sgirish 	uint32_t		*rbr_vaddrp;
339344961713Sgirish 	uint32_t		bkaddr;
339444961713Sgirish 	nxge_status_t		status = NXGE_OK;
339544961713Sgirish 	int			i;
3396*86ef0a63SRichard Lowe 	uint32_t		nxge_port_rcr_size;
339744961713Sgirish 
339844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
339952ccf843Smisaki 	    "==> nxge_map_rxdma_channel_cfg_ring"));
340044961713Sgirish 
340144961713Sgirish 	cntl_dmap = *dma_cntl_p;
340244961713Sgirish 
340344961713Sgirish 	/* Map in the receive block ring */
340444961713Sgirish 	rbrp = *rbr_p;
340544961713Sgirish 	dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc;
340644961713Sgirish 	nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4);
340744961713Sgirish 	/*
340844961713Sgirish 	 * Zero out buffer block ring descriptors.
340944961713Sgirish 	 */
341044961713Sgirish 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
341144961713Sgirish 
341244961713Sgirish 	rcfga_p = &(rbrp->rbr_cfga);
341344961713Sgirish 	rcfgb_p = &(rbrp->rbr_cfgb);
341444961713Sgirish 	kick_p = &(rbrp->rbr_kick);
341544961713Sgirish 	rcfga_p->value = 0;
341644961713Sgirish 	rcfgb_p->value = 0;
341744961713Sgirish 	kick_p->value = 0;
341844961713Sgirish 	rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress;
341944961713Sgirish 	rcfga_p->value = (rbrp->rbr_addr &
342052ccf843Smisaki 	    (RBR_CFIG_A_STDADDR_MASK |
342152ccf843Smisaki 	    RBR_CFIG_A_STDADDR_BASE_MASK));
342244961713Sgirish 	rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT);
342344961713Sgirish 
342444961713Sgirish 	rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0;
342544961713Sgirish 	rcfgb_p->bits.ldw.vld0 = 1;
342644961713Sgirish 	rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1;
342744961713Sgirish 	rcfgb_p->bits.ldw.vld1 = 1;
342844961713Sgirish 	rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2;
342944961713Sgirish 	rcfgb_p->bits.ldw.vld2 = 1;
343044961713Sgirish 	rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code;
343144961713Sgirish 
343244961713Sgirish 	/*
343344961713Sgirish 	 * For each buffer block, enter receive block address to the ring.
343444961713Sgirish 	 */
343544961713Sgirish 	rbr_vaddrp = (uint32_t *)dmap->kaddrp;
343644961713Sgirish 	rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp;
343744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
343852ccf843Smisaki 	    "==> nxge_map_rxdma_channel_cfg_ring: channel %d "
343952ccf843Smisaki 	    "rbr_vaddrp $%p", dma_channel, rbr_vaddrp));
344044961713Sgirish 
344144961713Sgirish 	rx_msg_ring = rbrp->rx_msg_ring;
344244961713Sgirish 	for (i = 0; i < rbrp->tnblocks; i++) {
344344961713Sgirish 		rx_msg_p = rx_msg_ring[i];
344444961713Sgirish 		rx_msg_p->nxgep = nxgep;
344544961713Sgirish 		rx_msg_p->rx_rbr_p = rbrp;
344644961713Sgirish 		bkaddr = (uint32_t)
344752ccf843Smisaki 		    ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress
344852ccf843Smisaki 		    >> RBR_BKADDR_SHIFT));
344944961713Sgirish 		rx_msg_p->free = B_FALSE;
345044961713Sgirish 		rx_msg_p->max_usage_cnt = 0xbaddcafe;
345144961713Sgirish 
345244961713Sgirish 		*rbr_vaddrp++ = bkaddr;
345344961713Sgirish 	}
345444961713Sgirish 
345544961713Sgirish 	kick_p->bits.ldw.bkadd = rbrp->rbb_max;
345644961713Sgirish 	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
345744961713Sgirish 
345844961713Sgirish 	rbrp->rbr_rd_index = 0;
345944961713Sgirish 
346044961713Sgirish 	rbrp->rbr_consumed = 0;
346144961713Sgirish 	rbrp->rbr_use_bcopy = B_TRUE;
346244961713Sgirish 	rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0;
346344961713Sgirish 	/*
346444961713Sgirish 	 * Do bcopy on packets greater than bcopy size once
346544961713Sgirish 	 * the lo threshold is reached.
346644961713Sgirish 	 * This lo threshold should be less than the hi threshold.
346744961713Sgirish 	 *
346844961713Sgirish 	 * Do bcopy on every packet once the hi threshold is reached.
346944961713Sgirish 	 */
347044961713Sgirish 	if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) {
347144961713Sgirish 		/* default it to use hi */
347244961713Sgirish 		nxge_rx_threshold_lo = nxge_rx_threshold_hi;
347344961713Sgirish 	}
347444961713Sgirish 
347544961713Sgirish 	if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) {
347644961713Sgirish 		nxge_rx_buf_size_type = NXGE_RBR_TYPE2;
347744961713Sgirish 	}
347844961713Sgirish 	rbrp->rbr_bufsize_type = nxge_rx_buf_size_type;
347944961713Sgirish 
348044961713Sgirish 	switch (nxge_rx_threshold_hi) {
348144961713Sgirish 	default:
348244961713Sgirish 	case	NXGE_RX_COPY_NONE:
348344961713Sgirish 		/* Do not do bcopy at all */
348444961713Sgirish 		rbrp->rbr_use_bcopy = B_FALSE;
348544961713Sgirish 		rbrp->rbr_threshold_hi = rbrp->rbb_max;
348644961713Sgirish 		break;
348744961713Sgirish 
348844961713Sgirish 	case NXGE_RX_COPY_1:
348944961713Sgirish 	case NXGE_RX_COPY_2:
349044961713Sgirish 	case NXGE_RX_COPY_3:
349144961713Sgirish 	case NXGE_RX_COPY_4:
349244961713Sgirish 	case NXGE_RX_COPY_5:
349344961713Sgirish 	case NXGE_RX_COPY_6:
349444961713Sgirish 	case NXGE_RX_COPY_7:
349544961713Sgirish 		rbrp->rbr_threshold_hi =
349652ccf843Smisaki 		    rbrp->rbb_max *
349752ccf843Smisaki 		    (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE;
349844961713Sgirish 		break;
349944961713Sgirish 
350044961713Sgirish 	case NXGE_RX_COPY_ALL:
350144961713Sgirish 		rbrp->rbr_threshold_hi = 0;
350244961713Sgirish 		break;
350344961713Sgirish 	}
350444961713Sgirish 
350544961713Sgirish 	switch (nxge_rx_threshold_lo) {
350644961713Sgirish 	default:
350744961713Sgirish 	case	NXGE_RX_COPY_NONE:
350844961713Sgirish 		/* Do not do bcopy at all */
350944961713Sgirish 		if (rbrp->rbr_use_bcopy) {
351044961713Sgirish 			rbrp->rbr_use_bcopy = B_FALSE;
351144961713Sgirish 		}
351244961713Sgirish 		rbrp->rbr_threshold_lo = rbrp->rbb_max;
351344961713Sgirish 		break;
351444961713Sgirish 
351544961713Sgirish 	case NXGE_RX_COPY_1:
351644961713Sgirish 	case NXGE_RX_COPY_2:
351744961713Sgirish 	case NXGE_RX_COPY_3:
351844961713Sgirish 	case NXGE_RX_COPY_4:
351944961713Sgirish 	case NXGE_RX_COPY_5:
352044961713Sgirish 	case NXGE_RX_COPY_6:
352144961713Sgirish 	case NXGE_RX_COPY_7:
352244961713Sgirish 		rbrp->rbr_threshold_lo =
352352ccf843Smisaki 		    rbrp->rbb_max *
352452ccf843Smisaki 		    (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE;
352544961713Sgirish 		break;
352644961713Sgirish 
352744961713Sgirish 	case NXGE_RX_COPY_ALL:
352844961713Sgirish 		rbrp->rbr_threshold_lo = 0;
352944961713Sgirish 		break;
353044961713Sgirish 	}
353144961713Sgirish 
353244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
353352ccf843Smisaki 	    "nxge_map_rxdma_channel_cfg_ring: channel %d "
353452ccf843Smisaki 	    "rbb_max %d "
353552ccf843Smisaki 	    "rbrp->rbr_bufsize_type %d "
353652ccf843Smisaki 	    "rbb_threshold_hi %d "
353752ccf843Smisaki 	    "rbb_threshold_lo %d",
353852ccf843Smisaki 	    dma_channel,
353952ccf843Smisaki 	    rbrp->rbb_max,
354052ccf843Smisaki 	    rbrp->rbr_bufsize_type,
354152ccf843Smisaki 	    rbrp->rbr_threshold_hi,
354252ccf843Smisaki 	    rbrp->rbr_threshold_lo));
354344961713Sgirish 
354444961713Sgirish 	rbrp->page_valid.value = 0;
354544961713Sgirish 	rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0;
354644961713Sgirish 	rbrp->page_value_1.value = rbrp->page_value_2.value = 0;
354744961713Sgirish 	rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0;
354844961713Sgirish 	rbrp->page_hdl.value = 0;
354944961713Sgirish 
355044961713Sgirish 	rbrp->page_valid.bits.ldw.page0 = 1;
355144961713Sgirish 	rbrp->page_valid.bits.ldw.page1 = 1;
355244961713Sgirish 
355344961713Sgirish 	/* Map in the receive completion ring */
355444961713Sgirish 	rcrp = (p_rx_rcr_ring_t)
355552ccf843Smisaki 	    KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP);
355644961713Sgirish 	rcrp->rdc = dma_channel;
355744961713Sgirish 
355844961713Sgirish 	nxge_port_rcr_size = nxgep->nxge_port_rcr_size;
355944961713Sgirish 	rcrp->comp_size = nxge_port_rcr_size;
356044961713Sgirish 	rcrp->comp_wrap_mask = nxge_port_rcr_size - 1;
356144961713Sgirish 
356244961713Sgirish 	rcrp->max_receive_pkts = nxge_max_rx_pkts;
356344961713Sgirish 
356444961713Sgirish 	dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
356544961713Sgirish 	nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size,
356652ccf843Smisaki 	    sizeof (rcr_entry_t));
356744961713Sgirish 	rcrp->comp_rd_index = 0;
356844961713Sgirish 	rcrp->comp_wt_index = 0;
356944961713Sgirish 	rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
357052ccf843Smisaki 	    (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
357152ccf843Smisaki 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
357252ccf843Smisaki 	    (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
357344961713Sgirish 
357444961713Sgirish 	rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
357552ccf843Smisaki 	    (nxge_port_rcr_size - 1);
357644961713Sgirish 	rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
357752ccf843Smisaki 	    (nxge_port_rcr_size - 1);
357844961713Sgirish 
357944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
358052ccf843Smisaki 	    "==> nxge_map_rxdma_channel_cfg_ring: "
358152ccf843Smisaki 	    "channel %d "
358252ccf843Smisaki 	    "rbr_vaddrp $%p "
358352ccf843Smisaki 	    "rcr_desc_rd_head_p $%p "
358452ccf843Smisaki 	    "rcr_desc_rd_head_pp $%p "
358552ccf843Smisaki 	    "rcr_desc_rd_last_p $%p "
358652ccf843Smisaki 	    "rcr_desc_rd_last_pp $%p ",
358752ccf843Smisaki 	    dma_channel,
358852ccf843Smisaki 	    rbr_vaddrp,
358952ccf843Smisaki 	    rcrp->rcr_desc_rd_head_p,
359052ccf843Smisaki 	    rcrp->rcr_desc_rd_head_pp,
359152ccf843Smisaki 	    rcrp->rcr_desc_last_p,
359252ccf843Smisaki 	    rcrp->rcr_desc_last_pp));
359344961713Sgirish 
359444961713Sgirish 	/*
359544961713Sgirish 	 * Zero out buffer block ring descriptors.
359644961713Sgirish 	 */
359744961713Sgirish 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
35987b26d9ffSSantwona Behera 
35997b26d9ffSSantwona Behera 	rcrp->intr_timeout = (nxgep->intr_timeout <
36007b26d9ffSSantwona Behera 	    NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN :
36017b26d9ffSSantwona Behera 	    nxgep->intr_timeout;
36027b26d9ffSSantwona Behera 
36037b26d9ffSSantwona Behera 	rcrp->intr_threshold = (nxgep->intr_threshold <
36047b26d9ffSSantwona Behera 	    NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN :
36057b26d9ffSSantwona Behera 	    nxgep->intr_threshold;
36067b26d9ffSSantwona Behera 
360744961713Sgirish 	rcrp->full_hdr_flag = B_FALSE;
36084df55fdeSJanie Lu 
36094df55fdeSJanie Lu 	rcrp->sw_priv_hdr_len = nxge_rdc_buf_offset;
36104df55fdeSJanie Lu 
361144961713Sgirish 
361244961713Sgirish 	cfga_p = &(rcrp->rcr_cfga);
361344961713Sgirish 	cfgb_p = &(rcrp->rcr_cfgb);
361444961713Sgirish 	cfga_p->value = 0;
361544961713Sgirish 	cfgb_p->value = 0;
361644961713Sgirish 	rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress;
361744961713Sgirish 	cfga_p->value = (rcrp->rcr_addr &
361852ccf843Smisaki 	    (RCRCFIG_A_STADDR_MASK |
361952ccf843Smisaki 	    RCRCFIG_A_STADDR_BASE_MASK));
362044961713Sgirish 
362144961713Sgirish 	rcfga_p->value |= ((uint64_t)rcrp->comp_size <<
362252ccf843Smisaki 	    RCRCFIG_A_LEN_SHIF);
362344961713Sgirish 
362444961713Sgirish 	/*
362544961713Sgirish 	 * Timeout should be set based on the system clock divider.
36267b26d9ffSSantwona Behera 	 * A timeout value of 1 assumes that the
362744961713Sgirish 	 * granularity (1000) is 3 microseconds running at 300MHz.
362844961713Sgirish 	 */
362914ea4bb7Ssd 	cfgb_p->bits.ldw.pthres = rcrp->intr_threshold;
363014ea4bb7Ssd 	cfgb_p->bits.ldw.timeout = rcrp->intr_timeout;
363144961713Sgirish 	cfgb_p->bits.ldw.entout = 1;
363244961713Sgirish 
363344961713Sgirish 	/* Map in the mailbox */
363444961713Sgirish 	mboxp = (p_rx_mbox_t)
363552ccf843Smisaki 	    KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP);
363644961713Sgirish 	dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox;
363744961713Sgirish 	nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t));
363844961713Sgirish 	cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1;
363944961713Sgirish 	cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2;
364044961713Sgirish 	cfig1_p->value = cfig2_p->value = 0;
364144961713Sgirish 
364244961713Sgirish 	mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress;
364344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
364452ccf843Smisaki 	    "==> nxge_map_rxdma_channel_cfg_ring: "
364552ccf843Smisaki 	    "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx",
364652ccf843Smisaki 	    dma_channel, cfig1_p->value, cfig2_p->value,
364752ccf843Smisaki 	    mboxp->mbox_addr));
364844961713Sgirish 
364944961713Sgirish 	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32
365052ccf843Smisaki 	    & 0xfff);
365144961713Sgirish 	cfig1_p->bits.ldw.mbaddr_h = dmaaddrp;
365244961713Sgirish 
365344961713Sgirish 
365444961713Sgirish 	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff);
365544961713Sgirish 	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress &
365652ccf843Smisaki 	    RXDMA_CFIG2_MBADDR_L_MASK);
365744961713Sgirish 
365844961713Sgirish 	cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT);
365944961713Sgirish 
366044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
366152ccf843Smisaki 	    "==> nxge_map_rxdma_channel_cfg_ring: "
366252ccf843Smisaki 	    "channel %d damaddrp $%p "
366352ccf843Smisaki 	    "cfg1 0x%016llx cfig2 0x%016llx",
366452ccf843Smisaki 	    dma_channel, dmaaddrp,
366552ccf843Smisaki 	    cfig1_p->value, cfig2_p->value));
366644961713Sgirish 
366744961713Sgirish 	cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag;
36684df55fdeSJanie Lu 	if (nxgep->niu_hw_type == NIU_HW_TYPE_RF) {
36694df55fdeSJanie Lu 		switch (rcrp->sw_priv_hdr_len) {
36704df55fdeSJanie Lu 			case SW_OFFSET_NO_OFFSET:
36714df55fdeSJanie Lu 			case SW_OFFSET_64:
36724df55fdeSJanie Lu 			case SW_OFFSET_128:
36734df55fdeSJanie Lu 			case SW_OFFSET_192:
36744df55fdeSJanie Lu 				cfig2_p->bits.ldw.offset =
36754df55fdeSJanie Lu 				    rcrp->sw_priv_hdr_len;
36764df55fdeSJanie Lu 				cfig2_p->bits.ldw.offset256 = 0;
36774df55fdeSJanie Lu 				break;
36784df55fdeSJanie Lu 			case SW_OFFSET_256:
36794df55fdeSJanie Lu 			case SW_OFFSET_320:
36804df55fdeSJanie Lu 			case SW_OFFSET_384:
36814df55fdeSJanie Lu 			case SW_OFFSET_448:
36824df55fdeSJanie Lu 				cfig2_p->bits.ldw.offset =
36834df55fdeSJanie Lu 				    rcrp->sw_priv_hdr_len & 0x3;
36844df55fdeSJanie Lu 				cfig2_p->bits.ldw.offset256 = 1;
36854df55fdeSJanie Lu 				break;
36864df55fdeSJanie Lu 			default:
36874df55fdeSJanie Lu 				cfig2_p->bits.ldw.offset = SW_OFFSET_NO_OFFSET;
36884df55fdeSJanie Lu 				cfig2_p->bits.ldw.offset256 = 0;
36894df55fdeSJanie Lu 			}
36904df55fdeSJanie Lu 	} else {
36914df55fdeSJanie Lu 		cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len;
36924df55fdeSJanie Lu 	}
369344961713Sgirish 
369444961713Sgirish 	rbrp->rx_rcr_p = rcrp;
369544961713Sgirish 	rcrp->rx_rbr_p = rbrp;
369644961713Sgirish 	*rcr_p = rcrp;
369744961713Sgirish 	*rx_mbox_p = mboxp;
369844961713Sgirish 
369944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
370052ccf843Smisaki 	    "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status));
370144961713Sgirish 
370244961713Sgirish 	return (status);
370344961713Sgirish }
370444961713Sgirish 
370544961713Sgirish /*ARGSUSED*/
370644961713Sgirish static void
nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep,p_rx_rcr_ring_t rcr_p,p_rx_mbox_t rx_mbox_p)370744961713Sgirish nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep,
370844961713Sgirish     p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
370944961713Sgirish {
371044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
371152ccf843Smisaki 	    "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d",
371252ccf843Smisaki 	    rcr_p->rdc));
371344961713Sgirish 
371444961713Sgirish 	KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t));
371544961713Sgirish 	KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t));
371644961713Sgirish 
371744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
371852ccf843Smisaki 	    "<== nxge_unmap_rxdma_channel_cfg_ring"));
371944961713Sgirish }
372044961713Sgirish 
372144961713Sgirish static nxge_status_t
nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep,uint16_t channel,p_nxge_dma_common_t * dma_buf_p,p_rx_rbr_ring_t * rbr_p,uint32_t num_chunks)372244961713Sgirish nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel,
372344961713Sgirish     p_nxge_dma_common_t *dma_buf_p,
372444961713Sgirish     p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks)
372544961713Sgirish {
3726*86ef0a63SRichard Lowe 	p_rx_rbr_ring_t		rbrp;
3727*86ef0a63SRichard Lowe 	p_nxge_dma_common_t	dma_bufp, tmp_bufp;
3728*86ef0a63SRichard Lowe 	p_rx_msg_t		*rx_msg_ring;
3729*86ef0a63SRichard Lowe 	p_rx_msg_t		rx_msg_p;
3730*86ef0a63SRichard Lowe 	p_mblk_t		mblk_p;
373144961713Sgirish 
373244961713Sgirish 	rxring_info_t *ring_info;
373344961713Sgirish 	nxge_status_t		status = NXGE_OK;
373444961713Sgirish 	int			i, j, index;
373544961713Sgirish 	uint32_t		size, bsize, nblocks, nmsgs;
373644961713Sgirish 
373744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
373852ccf843Smisaki 	    "==> nxge_map_rxdma_channel_buf_ring: channel %d",
373952ccf843Smisaki 	    channel));
374044961713Sgirish 
374144961713Sgirish 	dma_bufp = tmp_bufp = *dma_buf_p;
374244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
374352ccf843Smisaki 	    " nxge_map_rxdma_channel_buf_ring: channel %d to map %d "
374452ccf843Smisaki 	    "chunks bufp 0x%016llx",
374552ccf843Smisaki 	    channel, num_chunks, dma_bufp));
374644961713Sgirish 
374744961713Sgirish 	nmsgs = 0;
374844961713Sgirish 	for (i = 0; i < num_chunks; i++, tmp_bufp++) {
374944961713Sgirish 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
375052ccf843Smisaki 		    "==> nxge_map_rxdma_channel_buf_ring: channel %d "
375152ccf843Smisaki 		    "bufp 0x%016llx nblocks %d nmsgs %d",
375252ccf843Smisaki 		    channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
375344961713Sgirish 		nmsgs += tmp_bufp->nblocks;
375444961713Sgirish 	}
375544961713Sgirish 	if (!nmsgs) {
375656d930aeSspeer 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
375752ccf843Smisaki 		    "<== nxge_map_rxdma_channel_buf_ring: channel %d "
375852ccf843Smisaki 		    "no msg blocks",
375952ccf843Smisaki 		    channel));
376044961713Sgirish 		status = NXGE_ERROR;
376144961713Sgirish 		goto nxge_map_rxdma_channel_buf_ring_exit;
376244961713Sgirish 	}
376344961713Sgirish 
3764007969e0Stm 	rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP);
376544961713Sgirish 
376644961713Sgirish 	size = nmsgs * sizeof (p_rx_msg_t);
376744961713Sgirish 	rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
376844961713Sgirish 	ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t),
376952ccf843Smisaki 	    KM_SLEEP);
377044961713Sgirish 
377144961713Sgirish 	MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER,
377252ccf843Smisaki 	    (void *)nxgep->interrupt_cookie);
377344961713Sgirish 	MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER,
377452ccf843Smisaki 	    (void *)nxgep->interrupt_cookie);
377544961713Sgirish 	rbrp->rdc = channel;
377644961713Sgirish 	rbrp->num_blocks = num_chunks;
377744961713Sgirish 	rbrp->tnblocks = nmsgs;
377844961713Sgirish 	rbrp->rbb_max = nmsgs;
377944961713Sgirish 	rbrp->rbr_max_size = nmsgs;
378044961713Sgirish 	rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1);
378144961713Sgirish 
378244961713Sgirish 	/*
378344961713Sgirish 	 * Buffer sizes suggested by NIU architect.
378444961713Sgirish 	 * 256, 512 and 2K.
378544961713Sgirish 	 */
378644961713Sgirish 
378744961713Sgirish 	rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B;
378844961713Sgirish 	rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES;
378944961713Sgirish 	rbrp->npi_pkt_buf_size0 = SIZE_256B;
379044961713Sgirish 
379144961713Sgirish 	rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K;
379244961713Sgirish 	rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES;
379344961713Sgirish 	rbrp->npi_pkt_buf_size1 = SIZE_1KB;
379444961713Sgirish 
379544961713Sgirish 	rbrp->block_size = nxgep->rx_default_block_size;
379644961713Sgirish 
379748056c53SMichael Speer 	if (!nxgep->mac.is_jumbo) {
379844961713Sgirish 		rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K;
379944961713Sgirish 		rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES;
380044961713Sgirish 		rbrp->npi_pkt_buf_size2 = SIZE_2KB;
380144961713Sgirish 	} else {
380244961713Sgirish 		if (rbrp->block_size >= 0x2000) {
380344961713Sgirish 			rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K;
380444961713Sgirish 			rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES;
380544961713Sgirish 			rbrp->npi_pkt_buf_size2 = SIZE_8KB;
380644961713Sgirish 		} else {
380744961713Sgirish 			rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K;
380844961713Sgirish 			rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES;
380944961713Sgirish 			rbrp->npi_pkt_buf_size2 = SIZE_4KB;
381044961713Sgirish 		}
381144961713Sgirish 	}
381244961713Sgirish 
381344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
381452ccf843Smisaki 	    "==> nxge_map_rxdma_channel_buf_ring: channel %d "
381552ccf843Smisaki 	    "actual rbr max %d rbb_max %d nmsgs %d "
381652ccf843Smisaki 	    "rbrp->block_size %d default_block_size %d "
381752ccf843Smisaki 	    "(config nxge_rbr_size %d nxge_rbr_spare_size %d)",
381852ccf843Smisaki 	    channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs,
381952ccf843Smisaki 	    rbrp->block_size, nxgep->rx_default_block_size,
382052ccf843Smisaki 	    nxge_rbr_size, nxge_rbr_spare_size));
382144961713Sgirish 
3822*86ef0a63SRichard Lowe 	/* Map in buffers from the buffer pool.	 */
382344961713Sgirish 	index = 0;
382444961713Sgirish 	for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) {
382544961713Sgirish 		bsize = dma_bufp->block_size;
382644961713Sgirish 		nblocks = dma_bufp->nblocks;
382744961713Sgirish 		ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp;
382844961713Sgirish 		ring_info->buffer[i].buf_index = i;
382944961713Sgirish 		ring_info->buffer[i].buf_size = dma_bufp->alength;
383044961713Sgirish 		ring_info->buffer[i].start_index = index;
383144961713Sgirish 		ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp;
383244961713Sgirish 
383344961713Sgirish 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
383452ccf843Smisaki 		    " nxge_map_rxdma_channel_buf_ring: map channel %d "
383552ccf843Smisaki 		    "chunk %d"
383652ccf843Smisaki 		    " nblocks %d chunk_size %x block_size 0x%x "
383752ccf843Smisaki 		    "dma_bufp $%p", channel, i,
383852ccf843Smisaki 		    dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize,
383952ccf843Smisaki 		    dma_bufp));
384044961713Sgirish 
384144961713Sgirish 		for (j = 0; j < nblocks; j++) {
384244961713Sgirish 			if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO,
384352ccf843Smisaki 			    dma_bufp)) == NULL) {
384456d930aeSspeer 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
384552ccf843Smisaki 				    "allocb failed (index %d i %d j %d)",
384652ccf843Smisaki 				    index, i, j));
384756d930aeSspeer 				goto nxge_map_rxdma_channel_buf_ring_fail1;
384844961713Sgirish 			}
384944961713Sgirish 			rx_msg_ring[index] = rx_msg_p;
385044961713Sgirish 			rx_msg_p->block_index = index;
385144961713Sgirish 			rx_msg_p->shifted_addr = (uint32_t)
385252ccf843Smisaki 			    ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >>
385352ccf843Smisaki 			    RBR_BKADDR_SHIFT));
385444961713Sgirish 
385544961713Sgirish 			NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
385652ccf843Smisaki 			    "index %d j %d rx_msg_p $%p mblk %p",
385752ccf843Smisaki 			    index, j, rx_msg_p, rx_msg_p->rx_mblk_p));
385844961713Sgirish 
385944961713Sgirish 			mblk_p = rx_msg_p->rx_mblk_p;
386044961713Sgirish 			mblk_p->b_wptr = mblk_p->b_rptr + bsize;
3861007969e0Stm 
3862007969e0Stm 			rbrp->rbr_ref_cnt++;
386344961713Sgirish 			index++;
386444961713Sgirish 			rx_msg_p->buf_dma.dma_channel = channel;
386544961713Sgirish 		}
3866678453a8Sspeer 
3867678453a8Sspeer 		rbrp->rbr_alloc_type = DDI_MEM_ALLOC;
3868678453a8Sspeer 		if (dma_bufp->contig_alloc_type) {
3869678453a8Sspeer 			rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC;
3870678453a8Sspeer 		}
3871678453a8Sspeer 
3872678453a8Sspeer 		if (dma_bufp->kmem_alloc_type) {
3873678453a8Sspeer 			rbrp->rbr_alloc_type = KMEM_ALLOC;
3874678453a8Sspeer 		}
3875678453a8Sspeer 
3876678453a8Sspeer 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3877678453a8Sspeer 		    " nxge_map_rxdma_channel_buf_ring: map channel %d "
3878678453a8Sspeer 		    "chunk %d"
3879678453a8Sspeer 		    " nblocks %d chunk_size %x block_size 0x%x "
3880678453a8Sspeer 		    "dma_bufp $%p",
3881678453a8Sspeer 		    channel, i,
3882678453a8Sspeer 		    dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize,
3883678453a8Sspeer 		    dma_bufp));
388444961713Sgirish 	}
388544961713Sgirish 	if (i < rbrp->num_blocks) {
388644961713Sgirish 		goto nxge_map_rxdma_channel_buf_ring_fail1;
388744961713Sgirish 	}
388844961713Sgirish 
388944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
389052ccf843Smisaki 	    "nxge_map_rxdma_channel_buf_ring: done buf init "
389152ccf843Smisaki 	    "channel %d msg block entries %d",
389252ccf843Smisaki 	    channel, index));
389344961713Sgirish 	ring_info->block_size_mask = bsize - 1;
389444961713Sgirish 	rbrp->rx_msg_ring = rx_msg_ring;
389544961713Sgirish 	rbrp->dma_bufp = dma_buf_p;
389644961713Sgirish 	rbrp->ring_info = ring_info;
389744961713Sgirish 
389844961713Sgirish 	status = nxge_rxbuf_index_info_init(nxgep, rbrp);
389944961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
390052ccf843Smisaki 	    " nxge_map_rxdma_channel_buf_ring: "
390152ccf843Smisaki 	    "channel %d done buf info init", channel));
390244961713Sgirish 
3903007969e0Stm 	/*
3904007969e0Stm 	 * Finally, permit nxge_freeb() to call nxge_post_page().
3905007969e0Stm 	 */
3906007969e0Stm 	rbrp->rbr_state = RBR_POSTING;
3907007969e0Stm 
390844961713Sgirish 	*rbr_p = rbrp;
390944961713Sgirish 	goto nxge_map_rxdma_channel_buf_ring_exit;
391044961713Sgirish 
391144961713Sgirish nxge_map_rxdma_channel_buf_ring_fail1:
391244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
391352ccf843Smisaki 	    " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)",
391452ccf843Smisaki 	    channel, status));
391544961713Sgirish 
391644961713Sgirish 	index--;
391744961713Sgirish 	for (; index >= 0; index--) {
391844961713Sgirish 		rx_msg_p = rx_msg_ring[index];
391944961713Sgirish 		if (rx_msg_p != NULL) {
392014ea4bb7Ssd 			freeb(rx_msg_p->rx_mblk_p);
392144961713Sgirish 			rx_msg_ring[index] = NULL;
392244961713Sgirish 		}
392344961713Sgirish 	}
392444961713Sgirish nxge_map_rxdma_channel_buf_ring_fail:
392544961713Sgirish 	MUTEX_DESTROY(&rbrp->post_lock);
392644961713Sgirish 	MUTEX_DESTROY(&rbrp->lock);
392744961713Sgirish 	KMEM_FREE(ring_info, sizeof (rxring_info_t));
392844961713Sgirish 	KMEM_FREE(rx_msg_ring, size);
392944961713Sgirish 	KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t));
393044961713Sgirish 
393156d930aeSspeer 	status = NXGE_ERROR;
393256d930aeSspeer 
393344961713Sgirish nxge_map_rxdma_channel_buf_ring_exit:
393444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
393552ccf843Smisaki 	    "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status));
393644961713Sgirish 
393744961713Sgirish 	return (status);
393844961713Sgirish }
393944961713Sgirish 
394044961713Sgirish /*ARGSUSED*/
394144961713Sgirish static void
nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep,p_rx_rbr_ring_t rbr_p)394244961713Sgirish nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep,
394344961713Sgirish     p_rx_rbr_ring_t rbr_p)
394444961713Sgirish {
3945*86ef0a63SRichard Lowe 	p_rx_msg_t		*rx_msg_ring;
3946*86ef0a63SRichard Lowe 	p_rx_msg_t		rx_msg_p;
3947*86ef0a63SRichard Lowe 	rxring_info_t		*ring_info;
394844961713Sgirish 	int			i;
394944961713Sgirish 	uint32_t		size;
395044961713Sgirish #ifdef	NXGE_DEBUG
395144961713Sgirish 	int			num_chunks;
395244961713Sgirish #endif
395344961713Sgirish 
395444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
395552ccf843Smisaki 	    "==> nxge_unmap_rxdma_channel_buf_ring"));
395644961713Sgirish 	if (rbr_p == NULL) {
395744961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
395852ccf843Smisaki 		    "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp"));
395944961713Sgirish 		return;
396044961713Sgirish 	}
396144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
396252ccf843Smisaki 	    "==> nxge_unmap_rxdma_channel_buf_ring: channel %d",
396352ccf843Smisaki 	    rbr_p->rdc));
396444961713Sgirish 
396544961713Sgirish 	rx_msg_ring = rbr_p->rx_msg_ring;
396644961713Sgirish 	ring_info = rbr_p->ring_info;
396744961713Sgirish 
396844961713Sgirish 	if (rx_msg_ring == NULL || ring_info == NULL) {
396952ccf843Smisaki 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
397052ccf843Smisaki 		    "<== nxge_unmap_rxdma_channel_buf_ring: "
397152ccf843Smisaki 		    "rx_msg_ring $%p ring_info $%p",
397252ccf843Smisaki 		    rx_msg_p, ring_info));
397344961713Sgirish 		return;
397444961713Sgirish 	}
397544961713Sgirish 
397644961713Sgirish #ifdef	NXGE_DEBUG
397744961713Sgirish 	num_chunks = rbr_p->num_blocks;
397844961713Sgirish #endif
397944961713Sgirish 	size = rbr_p->tnblocks * sizeof (p_rx_msg_t);
398044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
398152ccf843Smisaki 	    " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d "
398252ccf843Smisaki 	    "tnblocks %d (max %d) size ptrs %d ",
398352ccf843Smisaki 	    rbr_p->rdc, num_chunks,
398452ccf843Smisaki 	    rbr_p->tnblocks, rbr_p->rbr_max_size, size));
398544961713Sgirish 
398644961713Sgirish 	for (i = 0; i < rbr_p->tnblocks; i++) {
398744961713Sgirish 		rx_msg_p = rx_msg_ring[i];
398844961713Sgirish 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
398952ccf843Smisaki 		    " nxge_unmap_rxdma_channel_buf_ring: "
399052ccf843Smisaki 		    "rx_msg_p $%p",
399152ccf843Smisaki 		    rx_msg_p));
399244961713Sgirish 		if (rx_msg_p != NULL) {
399314ea4bb7Ssd 			freeb(rx_msg_p->rx_mblk_p);
399444961713Sgirish 			rx_msg_ring[i] = NULL;
399544961713Sgirish 		}
399644961713Sgirish 	}
399744961713Sgirish 
3998007969e0Stm 	/*
3999007969e0Stm 	 * We no longer may use the mutex <post_lock>. By setting
4000007969e0Stm 	 * <rbr_state> to anything but POSTING, we prevent
4001007969e0Stm 	 * nxge_post_page() from accessing a dead mutex.
4002007969e0Stm 	 */
4003007969e0Stm 	rbr_p->rbr_state = RBR_UNMAPPING;
400444961713Sgirish 	MUTEX_DESTROY(&rbr_p->post_lock);
4005007969e0Stm 
400644961713Sgirish 	MUTEX_DESTROY(&rbr_p->lock);
4007007969e0Stm 
4008007969e0Stm 	if (rbr_p->rbr_ref_cnt == 0) {
4009678453a8Sspeer 		/*
4010678453a8Sspeer 		 * This is the normal state of affairs.
4011678453a8Sspeer 		 * Need to free the following buffers:
4012678453a8Sspeer 		 *  - data buffers
4013678453a8Sspeer 		 *  - rx_msg ring
4014678453a8Sspeer 		 *  - ring_info
4015678453a8Sspeer 		 *  - rbr ring
4016678453a8Sspeer 		 */
4017678453a8Sspeer 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
4018678453a8Sspeer 		    "unmap_rxdma_buf_ring: No outstanding - freeing "));
4019678453a8Sspeer 		nxge_rxdma_databuf_free(rbr_p);
4020678453a8Sspeer 		KMEM_FREE(ring_info, sizeof (rxring_info_t));
4021678453a8Sspeer 		KMEM_FREE(rx_msg_ring, size);
4022007969e0Stm 		KMEM_FREE(rbr_p, sizeof (*rbr_p));
4023007969e0Stm 	} else {
4024007969e0Stm 		/*
4025007969e0Stm 		 * Some of our buffers are still being used.
4026007969e0Stm 		 * Therefore, tell nxge_freeb() this ring is
4027007969e0Stm 		 * unmapped, so it may free <rbr_p> for us.
4028007969e0Stm 		 */
4029007969e0Stm 		rbr_p->rbr_state = RBR_UNMAPPED;
4030007969e0Stm 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4031007969e0Stm 		    "unmap_rxdma_buf_ring: %d %s outstanding.",
4032007969e0Stm 		    rbr_p->rbr_ref_cnt,
4033007969e0Stm 		    rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs"));
4034007969e0Stm 	}
403544961713Sgirish 
403644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
403752ccf843Smisaki 	    "<== nxge_unmap_rxdma_channel_buf_ring"));
403844961713Sgirish }
403944961713Sgirish 
4040678453a8Sspeer /*
4041678453a8Sspeer  * nxge_rxdma_hw_start_common
4042678453a8Sspeer  *
4043678453a8Sspeer  * Arguments:
4044*86ef0a63SRichard Lowe  *	nxgep
4045678453a8Sspeer  *
4046678453a8Sspeer  * Notes:
4047678453a8Sspeer  *
4048678453a8Sspeer  * NPI/NXGE function calls:
4049678453a8Sspeer  *	nxge_init_fzc_rx_common();
4050678453a8Sspeer  *	nxge_init_fzc_rxdma_port();
4051678453a8Sspeer  *
4052678453a8Sspeer  * Registers accessed:
4053678453a8Sspeer  *
4054678453a8Sspeer  * Context:
4055678453a8Sspeer  *	Service domain
4056678453a8Sspeer  */
405744961713Sgirish static nxge_status_t
nxge_rxdma_hw_start_common(p_nxge_t nxgep)405844961713Sgirish nxge_rxdma_hw_start_common(p_nxge_t nxgep)
405944961713Sgirish {
406044961713Sgirish 	nxge_status_t		status = NXGE_OK;
406144961713Sgirish 
406244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common"));
406344961713Sgirish 
406444961713Sgirish 	/*
406544961713Sgirish 	 * Load the sharable parameters by writing to the
406644961713Sgirish 	 * function zero control registers. These FZC registers
406744961713Sgirish 	 * should be initialized only once for the entire chip.
406844961713Sgirish 	 */
406944961713Sgirish 	(void) nxge_init_fzc_rx_common(nxgep);
407044961713Sgirish 
407144961713Sgirish 	/*
407244961713Sgirish 	 * Initialize the RXDMA port specific FZC control configurations.
407344961713Sgirish 	 * These FZC registers are pertaining to each port.
407444961713Sgirish 	 */
407544961713Sgirish 	(void) nxge_init_fzc_rxdma_port(nxgep);
407644961713Sgirish 
407744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common"));
407844961713Sgirish 
407944961713Sgirish 	return (status);
408044961713Sgirish }
408144961713Sgirish 
408244961713Sgirish static nxge_status_t
nxge_rxdma_hw_start(p_nxge_t nxgep,int channel)4083678453a8Sspeer nxge_rxdma_hw_start(p_nxge_t nxgep, int channel)
408444961713Sgirish {
408544961713Sgirish 	int			i, ndmas;
4086*86ef0a63SRichard Lowe 	p_rx_rbr_rings_t	rx_rbr_rings;
408744961713Sgirish 	p_rx_rbr_ring_t		*rbr_rings;
4088*86ef0a63SRichard Lowe 	p_rx_rcr_rings_t	rx_rcr_rings;
408944961713Sgirish 	p_rx_rcr_ring_t		*rcr_rings;
4090*86ef0a63SRichard Lowe 	p_rx_mbox_areas_t	rx_mbox_areas_p;
409144961713Sgirish 	p_rx_mbox_t		*rx_mbox_p;
409244961713Sgirish 	nxge_status_t		status = NXGE_OK;
409344961713Sgirish 
409444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start"));
409544961713Sgirish 
409644961713Sgirish 	rx_rbr_rings = nxgep->rx_rbr_rings;
409744961713Sgirish 	rx_rcr_rings = nxgep->rx_rcr_rings;
409844961713Sgirish 	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
409944961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
410052ccf843Smisaki 		    "<== nxge_rxdma_hw_start: NULL ring pointers"));
410144961713Sgirish 		return (NXGE_ERROR);
410244961713Sgirish 	}
410344961713Sgirish 	ndmas = rx_rbr_rings->ndmas;
410444961713Sgirish 	if (ndmas == 0) {
410544961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
410652ccf843Smisaki 		    "<== nxge_rxdma_hw_start: no dma channel allocated"));
410744961713Sgirish 		return (NXGE_ERROR);
410844961713Sgirish 	}
410944961713Sgirish 
411044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
411152ccf843Smisaki 	    "==> nxge_rxdma_hw_start (ndmas %d)", ndmas));
411244961713Sgirish 
411344961713Sgirish 	rbr_rings = rx_rbr_rings->rbr_rings;
411444961713Sgirish 	rcr_rings = rx_rcr_rings->rcr_rings;
411544961713Sgirish 	rx_mbox_areas_p = nxgep->rx_mbox_areas_p;
411644961713Sgirish 	if (rx_mbox_areas_p) {
411744961713Sgirish 		rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
4118e3d11eeeSToomas Soome 	} else {
4119e3d11eeeSToomas Soome 		rx_mbox_p = NULL;
412044961713Sgirish 	}
412144961713Sgirish 
4122678453a8Sspeer 	i = channel;
4123678453a8Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
412452ccf843Smisaki 	    "==> nxge_rxdma_hw_start (ndmas %d) channel %d",
412552ccf843Smisaki 	    ndmas, channel));
4126678453a8Sspeer 	status = nxge_rxdma_start_channel(nxgep, channel,
4127678453a8Sspeer 	    (p_rx_rbr_ring_t)rbr_rings[i],
4128678453a8Sspeer 	    (p_rx_rcr_ring_t)rcr_rings[i],
4129678453a8Sspeer 	    (p_rx_mbox_t)rx_mbox_p[i]);
4130678453a8Sspeer 	if (status != NXGE_OK) {
4131678453a8Sspeer 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4132678453a8Sspeer 		    "==> nxge_rxdma_hw_start: disable "
4133678453a8Sspeer 		    "(status 0x%x channel %d)", status, channel));
4134678453a8Sspeer 		return (status);
413544961713Sgirish 	}
413644961713Sgirish 
413744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: "
413852ccf843Smisaki 	    "rx_rbr_rings 0x%016llx rings 0x%016llx",
413952ccf843Smisaki 	    rx_rbr_rings, rx_rcr_rings));
414044961713Sgirish 
414144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
414252ccf843Smisaki 	    "==> nxge_rxdma_hw_start: (status 0x%x)", status));
414344961713Sgirish 
414444961713Sgirish 	return (status);
414544961713Sgirish }
414644961713Sgirish 
414744961713Sgirish static void
nxge_rxdma_hw_stop(p_nxge_t nxgep,int channel)4148678453a8Sspeer nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel)
414944961713Sgirish {
4150*86ef0a63SRichard Lowe 	p_rx_rbr_rings_t	rx_rbr_rings;
4151*86ef0a63SRichard Lowe 	p_rx_rcr_rings_t	rx_rcr_rings;
415244961713Sgirish 
415344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop"));
415444961713Sgirish 
415544961713Sgirish 	rx_rbr_rings = nxgep->rx_rbr_rings;
415644961713Sgirish 	rx_rcr_rings = nxgep->rx_rcr_rings;
415744961713Sgirish 	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
415844961713Sgirish 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
415952ccf843Smisaki 		    "<== nxge_rxdma_hw_stop: NULL ring pointers"));
416044961713Sgirish 		return;
416144961713Sgirish 	}
416244961713Sgirish 
416344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
416452ccf843Smisaki 	    "==> nxge_rxdma_hw_stop(channel %d)",
416552ccf843Smisaki 	    channel));
4166678453a8Sspeer 	(void) nxge_rxdma_stop_channel(nxgep, channel);
416744961713Sgirish 
416844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: "
416952ccf843Smisaki 	    "rx_rbr_rings 0x%016llx rings 0x%016llx",
417052ccf843Smisaki 	    rx_rbr_rings, rx_rcr_rings));
417144961713Sgirish 
417244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop"));
417344961713Sgirish }
417444961713Sgirish 
417544961713Sgirish 
417644961713Sgirish static nxge_status_t
nxge_rxdma_start_channel(p_nxge_t nxgep,uint16_t channel,p_rx_rbr_ring_t rbr_p,p_rx_rcr_ring_t rcr_p,p_rx_mbox_t mbox_p)417744961713Sgirish nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel,
417844961713Sgirish     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
417944961713Sgirish {
418044961713Sgirish 	npi_handle_t		handle;
418144961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
418244961713Sgirish 	rx_dma_ctl_stat_t	cs;
418344961713Sgirish 	rx_dma_ent_msk_t	ent_mask;
418444961713Sgirish 	nxge_status_t		status = NXGE_OK;
418544961713Sgirish 
418644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel"));
418744961713Sgirish 
418844961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
418944961713Sgirish 
419044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: "
4191*86ef0a63SRichard Lowe 	    "npi handle addr $%p acc $%p",
4192*86ef0a63SRichard Lowe 	    nxgep->npi_handle.regp, nxgep->npi_handle.regh));
419344961713Sgirish 
4194678453a8Sspeer 	/* Reset RXDMA channel, but not if you're a guest. */
4195678453a8Sspeer 	if (!isLDOMguest(nxgep)) {
4196678453a8Sspeer 		rs = npi_rxdma_cfg_rdc_reset(handle, channel);
4197678453a8Sspeer 		if (rs != NPI_SUCCESS) {
4198678453a8Sspeer 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4199678453a8Sspeer 			    "==> nxge_init_fzc_rdc: "
4200678453a8Sspeer 			    "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x",
4201678453a8Sspeer 			    channel, rs));
4202678453a8Sspeer 			return (NXGE_ERROR | rs);
4203678453a8Sspeer 		}
4204678453a8Sspeer 
4205678453a8Sspeer 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4206678453a8Sspeer 		    "==> nxge_rxdma_start_channel: reset done: channel %d",
4207678453a8Sspeer 		    channel));
420844961713Sgirish 	}
420944961713Sgirish 
4210678453a8Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
4211678453a8Sspeer 	if (isLDOMguest(nxgep))
4212678453a8Sspeer 		(void) nxge_rdc_lp_conf(nxgep, channel);
4213678453a8Sspeer #endif
421444961713Sgirish 
421544961713Sgirish 	/*
421644961713Sgirish 	 * Initialize the RXDMA channel specific FZC control
421744961713Sgirish 	 * configurations. These FZC registers are pertaining
421844961713Sgirish 	 * to each RX channel (logical pages).
421944961713Sgirish 	 */
4220678453a8Sspeer 	if (!isLDOMguest(nxgep)) {
4221678453a8Sspeer 		status = nxge_init_fzc_rxdma_channel(nxgep, channel);
4222678453a8Sspeer 		if (status != NXGE_OK) {
4223678453a8Sspeer 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4224*86ef0a63SRichard Lowe 			    "==> nxge_rxdma_start_channel: "
4225*86ef0a63SRichard Lowe 			    "init fzc rxdma failed (0x%08x channel %d)",
4226*86ef0a63SRichard Lowe 			    status, channel));
4227678453a8Sspeer 			return (status);
4228678453a8Sspeer 		}
422944961713Sgirish 
4230678453a8Sspeer 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4231*86ef0a63SRichard Lowe 		    "==> nxge_rxdma_start_channel: fzc done"));
4232678453a8Sspeer 	}
423344961713Sgirish 
423444961713Sgirish 	/* Set up the interrupt event masks. */
423544961713Sgirish 	ent_mask.value = 0;
423644961713Sgirish 	ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK;
423744961713Sgirish 	rs = npi_rxdma_event_mask(handle, OP_SET, channel,
4238678453a8Sspeer 	    &ent_mask);
423944961713Sgirish 	if (rs != NPI_SUCCESS) {
424044961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4241*86ef0a63SRichard Lowe 		    "==> nxge_rxdma_start_channel: "
4242*86ef0a63SRichard Lowe 		    "init rxdma event masks failed "
4243*86ef0a63SRichard Lowe 		    "(0x%08x channel %d)",
4244*86ef0a63SRichard Lowe 		    status, channel));
424544961713Sgirish 		return (NXGE_ERROR | rs);
424644961713Sgirish 	}
424744961713Sgirish 
4248678453a8Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4249*86ef0a63SRichard Lowe 	    "==> nxge_rxdma_start_channel: "
4250*86ef0a63SRichard Lowe 	    "event done: channel %d (mask 0x%016llx)",
4251*86ef0a63SRichard Lowe 	    channel, ent_mask.value));
425244961713Sgirish 
425344961713Sgirish 	/* Initialize the receive DMA control and status register */
425444961713Sgirish 	cs.value = 0;
425544961713Sgirish 	cs.bits.hdw.mex = 1;
425644961713Sgirish 	cs.bits.hdw.rcrthres = 1;
425744961713Sgirish 	cs.bits.hdw.rcrto = 1;
425844961713Sgirish 	cs.bits.hdw.rbr_empty = 1;
425944961713Sgirish 	status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs);
426044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
4261*86ef0a63SRichard Lowe 	    "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value));
426244961713Sgirish 	if (status != NXGE_OK) {
426344961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4264*86ef0a63SRichard Lowe 		    "==> nxge_rxdma_start_channel: "
4265*86ef0a63SRichard Lowe 		    "init rxdma control register failed (0x%08x channel %d",
4266*86ef0a63SRichard Lowe 		    status, channel));
426744961713Sgirish 		return (status);
426844961713Sgirish 	}
426944961713Sgirish 
427044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
4271*86ef0a63SRichard Lowe 	    "control done - channel %d cs 0x%016llx", channel, cs.value));
427244961713Sgirish 
427344961713Sgirish 	/*
427444961713Sgirish 	 * Load RXDMA descriptors, buffers, mailbox,
427544961713Sgirish 	 * initialise the receive DMA channels and
427644961713Sgirish 	 * enable each DMA channel.
427744961713Sgirish 	 */
427844961713Sgirish 	status = nxge_enable_rxdma_channel(nxgep,
4279678453a8Sspeer 	    channel, rbr_p, rcr_p, mbox_p);
428044961713Sgirish 
428144961713Sgirish 	if (status != NXGE_OK) {
428244961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4283678453a8Sspeer 		    " nxge_rxdma_start_channel: "
4284678453a8Sspeer 		    " enable rxdma failed (0x%08x channel %d)",
4285678453a8Sspeer 		    status, channel));
428644961713Sgirish 		return (status);
428744961713Sgirish 	}
428844961713Sgirish 
4289678453a8Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4290678453a8Sspeer 	    "==> nxge_rxdma_start_channel: enabled channel %d"));
4291678453a8Sspeer 
4292678453a8Sspeer 	if (isLDOMguest(nxgep)) {
4293678453a8Sspeer 		/* Add interrupt handler for this channel. */
4294ef523517SMichael Speer 		status = nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel);
4295ef523517SMichael Speer 		if (status != NXGE_OK) {
4296678453a8Sspeer 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4297678453a8Sspeer 			    " nxge_rxdma_start_channel: "
4298678453a8Sspeer 			    " nxge_hio_intr_add failed (0x%08x channel %d)",
4299ef523517SMichael Speer 			    status, channel));
4300ef523517SMichael Speer 			return (status);
4301678453a8Sspeer 		}
4302678453a8Sspeer 	}
4303678453a8Sspeer 
430444961713Sgirish 	ent_mask.value = 0;
430544961713Sgirish 	ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK |
4306*86ef0a63SRichard Lowe 	    RX_DMA_ENT_MSK_PTDROP_PKT_MASK);
430744961713Sgirish 	rs = npi_rxdma_event_mask(handle, OP_SET, channel,
4308*86ef0a63SRichard Lowe 	    &ent_mask);
430944961713Sgirish 	if (rs != NPI_SUCCESS) {
431044961713Sgirish 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4311*86ef0a63SRichard Lowe 		    "==> nxge_rxdma_start_channel: "
4312*86ef0a63SRichard Lowe 		    "init rxdma event masks failed (0x%08x channel %d)",
4313*86ef0a63SRichard Lowe 		    status, channel));
431444961713Sgirish 		return (NXGE_ERROR | rs);
431544961713Sgirish 	}
431644961713Sgirish 
431744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
4318*86ef0a63SRichard Lowe 	    "control done - channel %d cs 0x%016llx", channel, cs.value));
431944961713Sgirish 
432044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel"));
432144961713Sgirish 
432244961713Sgirish 	return (NXGE_OK);
432344961713Sgirish }
432444961713Sgirish 
432544961713Sgirish static nxge_status_t
nxge_rxdma_stop_channel(p_nxge_t nxgep,uint16_t channel)432644961713Sgirish nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel)
432744961713Sgirish {
432844961713Sgirish 	npi_handle_t		handle;
432944961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
433044961713Sgirish 	rx_dma_ctl_stat_t	cs;
433144961713Sgirish 	rx_dma_ent_msk_t	ent_mask;
433244961713Sgirish 	nxge_status_t		status = NXGE_OK;
433344961713Sgirish 
433444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel"));
433544961713Sgirish 
433644961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
433744961713Sgirish 
433844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: "
433952ccf843Smisaki 	    "npi handle addr $%p acc $%p",
434052ccf843Smisaki 	    nxgep->npi_handle.regp, nxgep->npi_handle.regh));
434144961713Sgirish 
4342330cd344SMichael Speer 	if (!isLDOMguest(nxgep)) {
4343330cd344SMichael Speer 		/*
4344330cd344SMichael Speer 		 * Stop RxMAC = A.9.2.6
4345330cd344SMichael Speer 		 */
4346330cd344SMichael Speer 		if (nxge_rx_mac_disable(nxgep) != NXGE_OK) {
4347330cd344SMichael Speer 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4348330cd344SMichael Speer 			    "nxge_rxdma_stop_channel: "
4349330cd344SMichael Speer 			    "Failed to disable RxMAC"));
4350330cd344SMichael Speer 		}
4351330cd344SMichael Speer 
4352330cd344SMichael Speer 		/*
4353330cd344SMichael Speer 		 * Drain IPP Port = A.9.3.6
4354330cd344SMichael Speer 		 */
4355330cd344SMichael Speer 		(void) nxge_ipp_drain(nxgep);
4356330cd344SMichael Speer 	}
4357330cd344SMichael Speer 
435844961713Sgirish 	/* Reset RXDMA channel */
435944961713Sgirish 	rs = npi_rxdma_cfg_rdc_reset(handle, channel);
436044961713Sgirish 	if (rs != NPI_SUCCESS) {
436144961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
436252ccf843Smisaki 		    " nxge_rxdma_stop_channel: "
436352ccf843Smisaki 		    " reset rxdma failed (0x%08x channel %d)",
436452ccf843Smisaki 		    rs, channel));
436544961713Sgirish 		return (NXGE_ERROR | rs);
436644961713Sgirish 	}
436744961713Sgirish 
436844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
436952ccf843Smisaki 	    "==> nxge_rxdma_stop_channel: reset done"));
437044961713Sgirish 
437144961713Sgirish 	/* Set up the interrupt event masks. */
437244961713Sgirish 	ent_mask.value = RX_DMA_ENT_MSK_ALL;
437344961713Sgirish 	rs = npi_rxdma_event_mask(handle, OP_SET, channel,
437452ccf843Smisaki 	    &ent_mask);
437544961713Sgirish 	if (rs != NPI_SUCCESS) {
437644961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
437752ccf843Smisaki 		    "==> nxge_rxdma_stop_channel: "
437852ccf843Smisaki 		    "set rxdma event masks failed (0x%08x channel %d)",
437952ccf843Smisaki 		    rs, channel));
438044961713Sgirish 		return (NXGE_ERROR | rs);
438144961713Sgirish 	}
438244961713Sgirish 
438344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
438452ccf843Smisaki 	    "==> nxge_rxdma_stop_channel: event done"));
438544961713Sgirish 
4386330cd344SMichael Speer 	/*
4387330cd344SMichael Speer 	 * Initialize the receive DMA control and status register
4388330cd344SMichael Speer 	 */
438944961713Sgirish 	cs.value = 0;
4390330cd344SMichael Speer 	status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs);
439144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control "
439252ccf843Smisaki 	    " to default (all 0s) 0x%08x", cs.value));
439344961713Sgirish 	if (status != NXGE_OK) {
439444961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
439552ccf843Smisaki 		    " nxge_rxdma_stop_channel: init rxdma"
439652ccf843Smisaki 		    " control register failed (0x%08x channel %d",
439752ccf843Smisaki 		    status, channel));
439844961713Sgirish 		return (status);
439944961713Sgirish 	}
440044961713Sgirish 
440144961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
440252ccf843Smisaki 	    "==> nxge_rxdma_stop_channel: control done"));
440344961713Sgirish 
4404330cd344SMichael Speer 	/*
4405330cd344SMichael Speer 	 * Make sure channel is disabled.
4406330cd344SMichael Speer 	 */
440744961713Sgirish 	status = nxge_disable_rxdma_channel(nxgep, channel);
4408da14cebeSEric Cheng 
440944961713Sgirish 	if (status != NXGE_OK) {
441044961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
441152ccf843Smisaki 		    " nxge_rxdma_stop_channel: "
441252ccf843Smisaki 		    " init enable rxdma failed (0x%08x channel %d)",
441352ccf843Smisaki 		    status, channel));
441444961713Sgirish 		return (status);
441544961713Sgirish 	}
441644961713Sgirish 
4417330cd344SMichael Speer 	if (!isLDOMguest(nxgep)) {
4418330cd344SMichael Speer 		/*
4419330cd344SMichael Speer 		 * Enable RxMAC = A.9.2.10
4420330cd344SMichael Speer 		 */
4421330cd344SMichael Speer 		if (nxge_rx_mac_enable(nxgep) != NXGE_OK) {
4422330cd344SMichael Speer 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4423330cd344SMichael Speer 			    "nxge_rxdma_stop_channel: Rx MAC still disabled"));
4424330cd344SMichael Speer 		}
4425330cd344SMichael Speer 	}
4426330cd344SMichael Speer 
442744961713Sgirish 	NXGE_DEBUG_MSG((nxgep,
442852ccf843Smisaki 	    RX_CTL, "==> nxge_rxdma_stop_channel: disable done"));
442944961713Sgirish 
443044961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel"));
443144961713Sgirish 
443244961713Sgirish 	return (NXGE_OK);
443344961713Sgirish }
443444961713Sgirish 
443544961713Sgirish nxge_status_t
nxge_rxdma_handle_sys_errors(p_nxge_t nxgep)443644961713Sgirish nxge_rxdma_handle_sys_errors(p_nxge_t nxgep)
443744961713Sgirish {
443844961713Sgirish 	npi_handle_t		handle;
443944961713Sgirish 	p_nxge_rdc_sys_stats_t	statsp;
444044961713Sgirish 	rx_ctl_dat_fifo_stat_t	stat;
444144961713Sgirish 	uint32_t		zcp_err_status;
444244961713Sgirish 	uint32_t		ipp_err_status;
444344961713Sgirish 	nxge_status_t		status = NXGE_OK;
444444961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
444544961713Sgirish 	boolean_t		my_err = B_FALSE;
444644961713Sgirish 
444744961713Sgirish 	handle = nxgep->npi_handle;
444844961713Sgirish 	statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats;
444944961713Sgirish 
445044961713Sgirish 	rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat);
445144961713Sgirish 
445244961713Sgirish 	if (rs != NPI_SUCCESS)
445344961713Sgirish 		return (NXGE_ERROR | rs);
445444961713Sgirish 
445544961713Sgirish 	if (stat.bits.ldw.id_mismatch) {
445644961713Sgirish 		statsp->id_mismatch++;
4457b37cc459SToomas Soome 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, 0,
445852ccf843Smisaki 		    NXGE_FM_EREPORT_RDMC_ID_MISMATCH);
445944961713Sgirish 		/* Global fatal error encountered */
446044961713Sgirish 	}
446144961713Sgirish 
446244961713Sgirish 	if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) {
446344961713Sgirish 		switch (nxgep->mac.portnum) {
446444961713Sgirish 		case 0:
446544961713Sgirish 			if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) ||
446652ccf843Smisaki 			    (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) {
446744961713Sgirish 				my_err = B_TRUE;
446844961713Sgirish 				zcp_err_status = stat.bits.ldw.zcp_eop_err;
446944961713Sgirish 				ipp_err_status = stat.bits.ldw.ipp_eop_err;
447044961713Sgirish 			}
447144961713Sgirish 			break;
447244961713Sgirish 		case 1:
447344961713Sgirish 			if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) ||
447452ccf843Smisaki 			    (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) {
447544961713Sgirish 				my_err = B_TRUE;
447644961713Sgirish 				zcp_err_status = stat.bits.ldw.zcp_eop_err;
447744961713Sgirish 				ipp_err_status = stat.bits.ldw.ipp_eop_err;
447844961713Sgirish 			}
447944961713Sgirish 			break;
448044961713Sgirish 		case 2:
448144961713Sgirish 			if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) ||
448252ccf843Smisaki 			    (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) {
448344961713Sgirish 				my_err = B_TRUE;
448444961713Sgirish 				zcp_err_status = stat.bits.ldw.zcp_eop_err;
448544961713Sgirish 				ipp_err_status = stat.bits.ldw.ipp_eop_err;
448644961713Sgirish 			}
448744961713Sgirish 			break;
448844961713Sgirish 		case 3:
448944961713Sgirish 			if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) ||
449052ccf843Smisaki 			    (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) {
449144961713Sgirish 				my_err = B_TRUE;
449244961713Sgirish 				zcp_err_status = stat.bits.ldw.zcp_eop_err;
449344961713Sgirish 				ipp_err_status = stat.bits.ldw.ipp_eop_err;
449444961713Sgirish 			}
449544961713Sgirish 			break;
449644961713Sgirish 		default:
449744961713Sgirish 			return (NXGE_ERROR);
449844961713Sgirish 		}
449944961713Sgirish 	}
450044961713Sgirish 
450144961713Sgirish 	if (my_err) {
450244961713Sgirish 		status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status,
450352ccf843Smisaki 		    zcp_err_status);
450444961713Sgirish 		if (status != NXGE_OK)
450544961713Sgirish 			return (status);
450644961713Sgirish 	}
450744961713Sgirish 
450844961713Sgirish 	return (NXGE_OK);
450944961713Sgirish }
451044961713Sgirish 
451144961713Sgirish static nxge_status_t
nxge_rxdma_handle_port_errors(p_nxge_t nxgep,uint32_t ipp_status,uint32_t zcp_status)451244961713Sgirish nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status,
4513*86ef0a63SRichard Lowe     uint32_t zcp_status)
451444961713Sgirish {
451544961713Sgirish 	boolean_t		rxport_fatal = B_FALSE;
451644961713Sgirish 	p_nxge_rdc_sys_stats_t	statsp;
451744961713Sgirish 	nxge_status_t		status = NXGE_OK;
451844961713Sgirish 	uint8_t			portn;
451944961713Sgirish 
452044961713Sgirish 	portn = nxgep->mac.portnum;
452144961713Sgirish 	statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats;
452244961713Sgirish 
452344961713Sgirish 	if (ipp_status & (0x1 << portn)) {
452444961713Sgirish 		statsp->ipp_eop_err++;
4525b37cc459SToomas Soome 		NXGE_FM_REPORT_ERROR(nxgep, portn, 0,
452652ccf843Smisaki 		    NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR);
452744961713Sgirish 		rxport_fatal = B_TRUE;
452844961713Sgirish 	}
452944961713Sgirish 
453044961713Sgirish 	if (zcp_status & (0x1 << portn)) {
453144961713Sgirish 		statsp->zcp_eop_err++;
4532b37cc459SToomas Soome 		NXGE_FM_REPORT_ERROR(nxgep, portn, 0,
453352ccf843Smisaki 		    NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR);
453444961713Sgirish 		rxport_fatal = B_TRUE;
453544961713Sgirish 	}
453644961713Sgirish 
453744961713Sgirish 	if (rxport_fatal) {
453844961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
453952ccf843Smisaki 		    " nxge_rxdma_handle_port_error: "
454052ccf843Smisaki 		    " fatal error on Port #%d\n",
454152ccf843Smisaki 		    portn));
454244961713Sgirish 		status = nxge_rx_port_fatal_err_recover(nxgep);
454344961713Sgirish 		if (status == NXGE_OK) {
454444961713Sgirish 			FM_SERVICE_RESTORED(nxgep);
454544961713Sgirish 		}
454644961713Sgirish 	}
454744961713Sgirish 
454844961713Sgirish 	return (status);
454944961713Sgirish }
455044961713Sgirish 
455144961713Sgirish static nxge_status_t
nxge_rxdma_fatal_err_recover(p_nxge_t nxgep,uint16_t channel)455244961713Sgirish nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel)
455344961713Sgirish {
455444961713Sgirish 	npi_handle_t		handle;
455544961713Sgirish 	npi_status_t		rs = NPI_SUCCESS;
455644961713Sgirish 	nxge_status_t		status = NXGE_OK;
455744961713Sgirish 	p_rx_rbr_ring_t		rbrp;
455844961713Sgirish 	p_rx_rcr_ring_t		rcrp;
455944961713Sgirish 	p_rx_mbox_t		mboxp;
456044961713Sgirish 	rx_dma_ent_msk_t	ent_mask;
456144961713Sgirish 	p_nxge_dma_common_t	dmap;
456244961713Sgirish 	uint32_t		ref_cnt;
456344961713Sgirish 	p_rx_msg_t		rx_msg_p;
456444961713Sgirish 	int			i;
456544961713Sgirish 	uint32_t		nxge_port_rcr_size;
456644961713Sgirish 
456744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover"));
456844961713Sgirish 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
456952ccf843Smisaki 	    "Recovering from RxDMAChannel#%d error...", channel));
457044961713Sgirish 
457144961713Sgirish 	/*
457244961713Sgirish 	 * Stop the dma channel waits for the stop done.
457344961713Sgirish 	 * If the stop done bit is not set, then create
457444961713Sgirish 	 * an error.
457544961713Sgirish 	 */
457644961713Sgirish 
457744961713Sgirish 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
457844961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop..."));
457944961713Sgirish 
45803587e8e2SMichael Speer 	rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[channel];
45813587e8e2SMichael Speer 	rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[channel];
458244961713Sgirish 
458344961713Sgirish 	MUTEX_ENTER(&rbrp->lock);
458444961713Sgirish 	MUTEX_ENTER(&rbrp->post_lock);
458544961713Sgirish 
458644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel..."));
458744961713Sgirish 
458844961713Sgirish 	rs = npi_rxdma_cfg_rdc_disable(handle, channel);
458944961713Sgirish 	if (rs != NPI_SUCCESS) {
459044961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
459152ccf843Smisaki 		    "nxge_disable_rxdma_channel:failed"));
459244961713Sgirish 		goto fail;
459344961713Sgirish 	}
459444961713Sgirish 
459544961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt..."));
459644961713Sgirish 
459744961713Sgirish 	/* Disable interrupt */
459844961713Sgirish 	ent_mask.value = RX_DMA_ENT_MSK_ALL;
459944961713Sgirish 	rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
460044961713Sgirish 	if (rs != NPI_SUCCESS) {
460144961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
460252ccf843Smisaki 		    "nxge_rxdma_stop_channel: "
460352ccf843Smisaki 		    "set rxdma event masks failed (channel %d)",
460452ccf843Smisaki 		    channel));
460544961713Sgirish 	}
460644961713Sgirish 
460744961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset..."));
460844961713Sgirish 
460944961713Sgirish 	/* Reset RXDMA channel */
461044961713Sgirish 	rs = npi_rxdma_cfg_rdc_reset(handle, channel);
461144961713Sgirish 	if (rs != NPI_SUCCESS) {
461244961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
461352ccf843Smisaki 		    "nxge_rxdma_fatal_err_recover: "
461452ccf843Smisaki 		    " reset rxdma failed (channel %d)", channel));
461544961713Sgirish 		goto fail;
461644961713Sgirish 	}
461744961713Sgirish 
461844961713Sgirish 	nxge_port_rcr_size = nxgep->nxge_port_rcr_size;
461944961713Sgirish 
46203587e8e2SMichael Speer 	mboxp = (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[channel];
462144961713Sgirish 
462244961713Sgirish 	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
462344961713Sgirish 	rbrp->rbr_rd_index = 0;
462444961713Sgirish 
462544961713Sgirish 	rcrp->comp_rd_index = 0;
462644961713Sgirish 	rcrp->comp_wt_index = 0;
462744961713Sgirish 	rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
462852ccf843Smisaki 	    (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
462952ccf843Smisaki 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
463052ccf843Smisaki 	    (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
463144961713Sgirish 
463244961713Sgirish 	rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
463352ccf843Smisaki 	    (nxge_port_rcr_size - 1);
463444961713Sgirish 	rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
463552ccf843Smisaki 	    (nxge_port_rcr_size - 1);
463644961713Sgirish 
463744961713Sgirish 	dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
463844961713Sgirish 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
463944961713Sgirish 
464044961713Sgirish 	cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size);
464144961713Sgirish 
464244961713Sgirish 	for (i = 0; i < rbrp->rbr_max_size; i++) {
464344961713Sgirish 		rx_msg_p = rbrp->rx_msg_ring[i];
464444961713Sgirish 		ref_cnt = rx_msg_p->ref_cnt;
464544961713Sgirish 		if (ref_cnt != 1) {
4646a3c5bd6dSspeer 			if (rx_msg_p->cur_usage_cnt !=
464752ccf843Smisaki 			    rx_msg_p->max_usage_cnt) {
464844961713Sgirish 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
464952ccf843Smisaki 				    "buf[%d]: cur_usage_cnt = %d "
465052ccf843Smisaki 				    "max_usage_cnt = %d\n", i,
465152ccf843Smisaki 				    rx_msg_p->cur_usage_cnt,
465252ccf843Smisaki 				    rx_msg_p->max_usage_cnt));
4653a3c5bd6dSspeer 			} else {
4654a3c5bd6dSspeer 				/* Buffer can be re-posted */
4655a3c5bd6dSspeer 				rx_msg_p->free = B_TRUE;
4656a3c5bd6dSspeer 				rx_msg_p->cur_usage_cnt = 0;
4657a3c5bd6dSspeer 				rx_msg_p->max_usage_cnt = 0xbaddcafe;
4658a3c5bd6dSspeer 				rx_msg_p->pkt_buf_size = 0;
4659a3c5bd6dSspeer 			}
466044961713Sgirish 		}
466144961713Sgirish 	}
466244961713Sgirish 
466344961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start..."));
466444961713Sgirish 
466544961713Sgirish 	status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp);
466644961713Sgirish 	if (status != NXGE_OK) {
466744961713Sgirish 		goto fail;
466844961713Sgirish 	}
466944961713Sgirish 
467044961713Sgirish 	MUTEX_EXIT(&rbrp->post_lock);
467144961713Sgirish 	MUTEX_EXIT(&rbrp->lock);
467244961713Sgirish 
467344961713Sgirish 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
467452ccf843Smisaki 	    "Recovery Successful, RxDMAChannel#%d Restored",
467552ccf843Smisaki 	    channel));
467644961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover"));
467744961713Sgirish 	return (NXGE_OK);
4678ef523517SMichael Speer 
467944961713Sgirish fail:
468044961713Sgirish 	MUTEX_EXIT(&rbrp->post_lock);
468144961713Sgirish 	MUTEX_EXIT(&rbrp->lock);
468244961713Sgirish 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
468344961713Sgirish 	return (NXGE_ERROR | rs);
468444961713Sgirish }
468544961713Sgirish 
468644961713Sgirish nxge_status_t
nxge_rx_port_fatal_err_recover(p_nxge_t nxgep)468744961713Sgirish nxge_rx_port_fatal_err_recover(p_nxge_t nxgep)
468844961713Sgirish {
4689678453a8Sspeer 	nxge_grp_set_t *set = &nxgep->rx_set;
4690678453a8Sspeer 	nxge_status_t status = NXGE_OK;
4691ef523517SMichael Speer 	p_rx_rcr_ring_t rcrp;
4692678453a8Sspeer 	int rdc;
469344961713Sgirish 
469444961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover"));
469544961713Sgirish 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
469652ccf843Smisaki 	    "Recovering from RxPort error..."));
4697678453a8Sspeer 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n"));
469844961713Sgirish 
469944961713Sgirish 	if (nxge_rx_mac_disable(nxgep) != NXGE_OK)
470044961713Sgirish 		goto fail;
470144961713Sgirish 
470244961713Sgirish 	NXGE_DELAY(1000);
470344961713Sgirish 
4704678453a8Sspeer 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels..."));
470544961713Sgirish 
4706678453a8Sspeer 	for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
4707678453a8Sspeer 		if ((1 << rdc) & set->owned.map) {
4708ef523517SMichael Speer 			rcrp = nxgep->rx_rcr_rings->rcr_rings[rdc];
4709ef523517SMichael Speer 			if (rcrp != NULL) {
4710ef523517SMichael Speer 				MUTEX_ENTER(&rcrp->lock);
4711ef523517SMichael Speer 				if (nxge_rxdma_fatal_err_recover(nxgep,
4712ef523517SMichael Speer 				    rdc) != NXGE_OK) {
4713ef523517SMichael Speer 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4714ef523517SMichael Speer 					    "Could not recover "
4715ef523517SMichael Speer 					    "channel %d", rdc));
4716ef523517SMichael Speer 				}
4717ef523517SMichael Speer 				MUTEX_EXIT(&rcrp->lock);
4718678453a8Sspeer 			}
471944961713Sgirish 		}
472044961713Sgirish 	}
472144961713Sgirish 
4722678453a8Sspeer 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP..."));
472344961713Sgirish 
472444961713Sgirish 	/* Reset IPP */
472544961713Sgirish 	if (nxge_ipp_reset(nxgep) != NXGE_OK) {
472644961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
472752ccf843Smisaki 		    "nxge_rx_port_fatal_err_recover: "
472852ccf843Smisaki 		    "Failed to reset IPP"));
472944961713Sgirish 		goto fail;
473044961713Sgirish 	}
473144961713Sgirish 
473244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC..."));
473344961713Sgirish 
473444961713Sgirish 	/* Reset RxMAC */
473544961713Sgirish 	if (nxge_rx_mac_reset(nxgep) != NXGE_OK) {
473644961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
473752ccf843Smisaki 		    "nxge_rx_port_fatal_err_recover: "
473852ccf843Smisaki 		    "Failed to reset RxMAC"));
473944961713Sgirish 		goto fail;
474044961713Sgirish 	}
474144961713Sgirish 
474244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP..."));
474344961713Sgirish 
474444961713Sgirish 	/* Re-Initialize IPP */
474544961713Sgirish 	if (nxge_ipp_init(nxgep) != NXGE_OK) {
474644961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
474752ccf843Smisaki 		    "nxge_rx_port_fatal_err_recover: "
474852ccf843Smisaki 		    "Failed to init IPP"));
474944961713Sgirish 		goto fail;
475044961713Sgirish 	}
475144961713Sgirish 
475244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC..."));
475344961713Sgirish 
475444961713Sgirish 	/* Re-Initialize RxMAC */
475544961713Sgirish 	if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) {
475644961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
475752ccf843Smisaki 		    "nxge_rx_port_fatal_err_recover: "
475852ccf843Smisaki 		    "Failed to reset RxMAC"));
475944961713Sgirish 		goto fail;
476044961713Sgirish 	}
476144961713Sgirish 
476244961713Sgirish 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC..."));
476344961713Sgirish 
476444961713Sgirish 	/* Re-enable RxMAC */
476544961713Sgirish 	if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) {
476644961713Sgirish 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
476752ccf843Smisaki 		    "nxge_rx_port_fatal_err_recover: "
476852ccf843Smisaki 		    "Failed to enable RxMAC"));
476944961713Sgirish 		goto fail;
477044961713Sgirish 	}
477144961713Sgirish 
477244961713Sgirish 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
477352ccf843Smisaki 	    "Recovery Successful, RxPort Restored"));
477444961713Sgirish 
477544961713Sgirish 	return (NXGE_OK);
477644961713Sgirish fail:
477744961713Sgirish 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
477844961713Sgirish 	return (status);
477944961713Sgirish }
478044961713Sgirish 
478144961713Sgirish void
nxge_rxdma_inject_err(p_nxge_t nxgep,uint32_t err_id,uint8_t chan)478244961713Sgirish nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan)
478344961713Sgirish {
478444961713Sgirish 	rx_dma_ctl_stat_t	cs;
478544961713Sgirish 	rx_ctl_dat_fifo_stat_t	cdfs;
478644961713Sgirish 
478744961713Sgirish 	switch (err_id) {
478844961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR:
478944961713Sgirish 	case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR:
479044961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR:
479144961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR:
479244961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RBR_TMOUT:
479344961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR:
479444961713Sgirish 	case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS:
479544961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR:
479644961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RCRINCON:
479744961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RCRFULL:
479844961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RBRFULL:
479944961713Sgirish 	case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE:
480044961713Sgirish 	case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE:
480144961713Sgirish 	case NXGE_FM_EREPORT_RDMC_CONFIG_ERR:
480244961713Sgirish 		RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG,
480352ccf843Smisaki 		    chan, &cs.value);
480444961713Sgirish 		if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR)
480544961713Sgirish 			cs.bits.hdw.rcr_ack_err = 1;
480644961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR)
480744961713Sgirish 			cs.bits.hdw.dc_fifo_err = 1;
480844961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR)
480944961713Sgirish 			cs.bits.hdw.rcr_sha_par = 1;
481044961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR)
481144961713Sgirish 			cs.bits.hdw.rbr_pre_par = 1;
481244961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT)
481344961713Sgirish 			cs.bits.hdw.rbr_tmout = 1;
481444961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR)
481544961713Sgirish 			cs.bits.hdw.rsp_cnt_err = 1;
481644961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS)
481744961713Sgirish 			cs.bits.hdw.byte_en_bus = 1;
481844961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR)
481944961713Sgirish 			cs.bits.hdw.rsp_dat_err = 1;
482044961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR)
482144961713Sgirish 			cs.bits.hdw.config_err = 1;
482244961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON)
482344961713Sgirish 			cs.bits.hdw.rcrincon = 1;
482444961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL)
482544961713Sgirish 			cs.bits.hdw.rcrfull = 1;
482644961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL)
482744961713Sgirish 			cs.bits.hdw.rbrfull = 1;
482844961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE)
482944961713Sgirish 			cs.bits.hdw.rbrlogpage = 1;
483044961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE)
483144961713Sgirish 			cs.bits.hdw.cfiglogpage = 1;
483244961713Sgirish 		cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n",
483352ccf843Smisaki 		    cs.value);
483444961713Sgirish 		RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG,
483552ccf843Smisaki 		    chan, cs.value);
483644961713Sgirish 		break;
483744961713Sgirish 	case NXGE_FM_EREPORT_RDMC_ID_MISMATCH:
483844961713Sgirish 	case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR:
483944961713Sgirish 	case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR:
484044961713Sgirish 		cdfs.value = 0;
484144961713Sgirish 		if (err_id ==  NXGE_FM_EREPORT_RDMC_ID_MISMATCH)
484244961713Sgirish 			cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum);
484344961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR)
484444961713Sgirish 			cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum);
484544961713Sgirish 		else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR)
484644961713Sgirish 			cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum);
484744961713Sgirish 		cmn_err(CE_NOTE,
484852ccf843Smisaki 		    "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n",
484952ccf843Smisaki 		    cdfs.value);
4850678453a8Sspeer 		NXGE_REG_WR64(nxgep->npi_handle,
4851678453a8Sspeer 		    RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value);
485244961713Sgirish 		break;
485344961713Sgirish 	case NXGE_FM_EREPORT_RDMC_DCF_ERR:
485444961713Sgirish 		break;
485553f3d8ecSyc 	case NXGE_FM_EREPORT_RDMC_RCR_ERR:
485644961713Sgirish 		break;
485744961713Sgirish 	}
485844961713Sgirish }
4859678453a8Sspeer 
4860678453a8Sspeer static void
nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p)4861678453a8Sspeer nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p)
4862678453a8Sspeer {
4863*86ef0a63SRichard Lowe 	rxring_info_t		*ring_info;
4864678453a8Sspeer 	int			index;
4865678453a8Sspeer 	uint32_t		chunk_size;
4866678453a8Sspeer 	uint64_t		kaddr;
4867678453a8Sspeer 	uint_t			num_blocks;
4868678453a8Sspeer 
4869678453a8Sspeer 	NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free"));
4870678453a8Sspeer 
4871678453a8Sspeer 	if (rbr_p == NULL) {
4872678453a8Sspeer 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4873678453a8Sspeer 		    "==> nxge_rxdma_databuf_free: NULL rbr pointer"));
4874678453a8Sspeer 		return;
4875678453a8Sspeer 	}
4876678453a8Sspeer 
4877678453a8Sspeer 	if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) {
4878e759c33aSMichael Speer 		NXGE_DEBUG_MSG((NULL, DMA_CTL,
4879e759c33aSMichael Speer 		    "<== nxge_rxdma_databuf_free: DDI"));
4880678453a8Sspeer 		return;
4881678453a8Sspeer 	}
4882678453a8Sspeer 
4883678453a8Sspeer 	ring_info = rbr_p->ring_info;
4884678453a8Sspeer 	if (ring_info == NULL) {
4885678453a8Sspeer 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4886678453a8Sspeer 		    "==> nxge_rxdma_databuf_free: NULL ring info"));
4887678453a8Sspeer 		return;
4888678453a8Sspeer 	}
4889678453a8Sspeer 	num_blocks = rbr_p->num_blocks;
4890678453a8Sspeer 	for (index = 0; index < num_blocks; index++) {
4891678453a8Sspeer 		kaddr = ring_info->buffer[index].kaddr;
4892678453a8Sspeer 		chunk_size = ring_info->buffer[index].buf_size;
4893678453a8Sspeer 		NXGE_DEBUG_MSG((NULL, DMA_CTL,
4894678453a8Sspeer 		    "==> nxge_rxdma_databuf_free: free chunk %d "
4895678453a8Sspeer 		    "kaddrp $%p chunk size %d",
4896678453a8Sspeer 		    index, kaddr, chunk_size));
4897b37cc459SToomas Soome 		if (kaddr == 0)
4898b37cc459SToomas Soome 			continue;
4899678453a8Sspeer 		nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size);
4900b37cc459SToomas Soome 		ring_info->buffer[index].kaddr = 0;
4901678453a8Sspeer 	}
4902678453a8Sspeer 
4903678453a8Sspeer 	NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free"));
4904678453a8Sspeer }
4905678453a8Sspeer 
4906678453a8Sspeer #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
4907678453a8Sspeer extern void contig_mem_free(void *, size_t);
4908678453a8Sspeer #endif
4909678453a8Sspeer 
4910678453a8Sspeer void
nxge_free_buf(buf_alloc_type_t alloc_type,uint64_t kaddr,uint32_t buf_size)4911678453a8Sspeer nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size)
4912678453a8Sspeer {
4913678453a8Sspeer 	NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf"));
4914678453a8Sspeer 
4915b37cc459SToomas Soome 	if (kaddr == 0 || !buf_size) {
4916678453a8Sspeer 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4917678453a8Sspeer 		    "==> nxge_free_buf: invalid kaddr $%p size to free %d",
4918678453a8Sspeer 		    kaddr, buf_size));
4919678453a8Sspeer 		return;
4920678453a8Sspeer 	}
4921678453a8Sspeer 
4922678453a8Sspeer 	switch (alloc_type) {
4923678453a8Sspeer 	case KMEM_ALLOC:
4924678453a8Sspeer 		NXGE_DEBUG_MSG((NULL, DMA_CTL,
4925678453a8Sspeer 		    "==> nxge_free_buf: freeing kmem $%p size %d",
4926678453a8Sspeer 		    kaddr, buf_size));
4927678453a8Sspeer 		KMEM_FREE((void *)kaddr, buf_size);
4928678453a8Sspeer 		break;
4929678453a8Sspeer 
4930678453a8Sspeer #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
4931678453a8Sspeer 	case CONTIG_MEM_ALLOC:
4932678453a8Sspeer 		NXGE_DEBUG_MSG((NULL, DMA_CTL,
4933678453a8Sspeer 		    "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d",
4934678453a8Sspeer 		    kaddr, buf_size));
4935678453a8Sspeer 		contig_mem_free((void *)kaddr, buf_size);
4936678453a8Sspeer 		break;
4937678453a8Sspeer #endif
4938678453a8Sspeer 
4939678453a8Sspeer 	default:
4940678453a8Sspeer 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4941678453a8Sspeer 		    "<== nxge_free_buf: unsupported alloc type %d",
4942678453a8Sspeer 		    alloc_type));
4943678453a8Sspeer 		return;
4944678453a8Sspeer 	}
4945678453a8Sspeer 
4946678453a8Sspeer 	NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf"));
4947678453a8Sspeer }
4948