1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #ifndef	_SYS_NXGE_NXGE_RXDMA_H
27 #define	_SYS_NXGE_NXGE_RXDMA_H
28 
29 #pragma ident	"%Z%%M%	%I%	%E% SMI"
30 
31 #ifdef	__cplusplus
32 extern "C" {
33 #endif
34 
35 #include <sys/nxge/nxge_rxdma_hw.h>
36 #include <npi_rxdma.h>
37 
38 #define	RXDMA_CK_DIV_DEFAULT		7500 	/* 25 usec */
39 /*
40  * Hardware RDC designer: 8 cache lines during Atlas bringup.
41  */
42 #define	RXDMA_RED_LESS_BYTES		(8 * 64) /* 8 cache line */
43 #define	RXDMA_RED_LESS_ENTRIES		(RXDMA_RED_LESS_BYTES/8)
44 #define	RXDMA_RED_WINDOW_DEFAULT	0
45 #define	RXDMA_RED_THRES_DEFAULT		0
46 
47 #define	RXDMA_RCR_PTHRES_DEFAULT	0x20
48 #define	RXDMA_RCR_TO_DEFAULT		0x8
49 
50 /*
51  * hardware workarounds: kick 16 (was 8 before)
52  */
53 #define	NXGE_RXDMA_POST_BATCH		16
54 
55 #define	RXBUF_START_ADDR(a, index, bsize)	((a & (index * bsize))
56 #define	RXBUF_OFFSET_FROM_START(a, start)	(start - a)
57 #define	RXBUF_64B_ALIGNED		64
58 
59 #define	NXGE_RXBUF_EXTRA		34
60 /*
61  * Receive buffer thresholds and buffer types
62  */
63 #define	NXGE_RX_BCOPY_SCALE	8	/* use 1/8 as lowest granularity */
64 typedef enum  {
65 	NXGE_RX_COPY_ALL = 0,		/* do bcopy on every packet	 */
66 	NXGE_RX_COPY_1,			/* bcopy on 1/8 of buffer posted */
67 	NXGE_RX_COPY_2,			/* bcopy on 2/8 of buffer posted */
68 	NXGE_RX_COPY_3,			/* bcopy on 3/8 of buffer posted */
69 	NXGE_RX_COPY_4,			/* bcopy on 4/8 of buffer posted */
70 	NXGE_RX_COPY_5,			/* bcopy on 5/8 of buffer posted */
71 	NXGE_RX_COPY_6,			/* bcopy on 6/8 of buffer posted */
72 	NXGE_RX_COPY_7,			/* bcopy on 7/8 of buffer posted */
73 	NXGE_RX_COPY_NONE		/* don't do bcopy at all	 */
74 } nxge_rxbuf_threshold_t;
75 
76 typedef enum  {
77 	NXGE_RBR_TYPE0 = RCR_PKTBUFSZ_0,  /* bcopy buffer size 0 (small) */
78 	NXGE_RBR_TYPE1 = RCR_PKTBUFSZ_1,  /* bcopy buffer size 1 (medium) */
79 	NXGE_RBR_TYPE2 = RCR_PKTBUFSZ_2	  /* bcopy buffer size 2 (large) */
80 } nxge_rxbuf_type_t;
81 
82 typedef	struct _rdc_errlog {
83 	rdmc_par_err_log_t	pre_par;
84 	rdmc_par_err_log_t	sha_par;
85 	uint8_t			compl_err_type;
86 } rdc_errlog_t;
87 
88 /*
89  * Receive  Statistics.
90  */
91 typedef struct _nxge_rx_ring_stats_t {
92 	uint64_t	ipackets;
93 	uint64_t	ibytes;
94 	uint32_t	ierrors;
95 	uint32_t	multircv;
96 	uint32_t	brdcstrcv;
97 	uint32_t	norcvbuf;
98 
99 	uint32_t	rx_inits;
100 	uint32_t	rx_jumbo_pkts;
101 	uint32_t	rx_multi_pkts;
102 	uint32_t	rx_mtu_pkts;
103 	uint32_t	rx_no_buf;
104 
105 	/*
106 	 * Receive buffer management statistics.
107 	 */
108 	uint32_t	rx_new_pages;
109 	uint32_t	rx_new_mtu_pgs;
110 	uint32_t	rx_new_nxt_pgs;
111 	uint32_t	rx_reused_pgs;
112 	uint32_t	rx_mtu_drops;
113 	uint32_t	rx_nxt_drops;
114 
115 	/*
116 	 * Error event stats.
117 	 */
118 	uint32_t	rx_rbr_tmout;
119 	uint32_t	pkt_too_long_err;
120 	uint32_t	l2_err;
121 	uint32_t	l4_cksum_err;
122 	uint32_t	fflp_soft_err;
123 	uint32_t	zcp_soft_err;
124 	uint32_t	rcr_unknown_err;
125 	uint32_t	dcf_err;
126 	uint32_t 	rbr_tmout;
127 	uint32_t 	rsp_cnt_err;
128 	uint32_t 	byte_en_err;
129 	uint32_t 	byte_en_bus;
130 	uint32_t 	rsp_dat_err;
131 	uint32_t 	rcr_ack_err;
132 	uint32_t 	dc_fifo_err;
133 	uint32_t 	rcr_sha_par;
134 	uint32_t 	rbr_pre_par;
135 	uint32_t 	port_drop_pkt;
136 	uint32_t 	wred_drop;
137 	uint32_t 	rbr_pre_empty;
138 	uint32_t 	rcr_shadow_full;
139 	uint32_t 	config_err;
140 	uint32_t 	rcrincon;
141 	uint32_t 	rcrfull;
142 	uint32_t 	rbr_empty;
143 	uint32_t 	rbrfull;
144 	uint32_t 	rbrlogpage;
145 	uint32_t 	cfiglogpage;
146 	uint32_t 	rcrto;
147 	uint32_t 	rcrthres;
148 	uint32_t 	mex;
149 	rdc_errlog_t	errlog;
150 } nxge_rx_ring_stats_t, *p_nxge_rx_ring_stats_t;
151 
152 typedef struct _nxge_rdc_sys_stats {
153 	uint32_t	pre_par;
154 	uint32_t	sha_par;
155 	uint32_t	id_mismatch;
156 	uint32_t	ipp_eop_err;
157 	uint32_t	zcp_eop_err;
158 } nxge_rdc_sys_stats_t, *p_nxge_rdc_sys_stats_t;
159 
160 /*
161  * Software reserved buffer offset
162  */
163 typedef struct _nxge_rxbuf_off_hdr_t {
164 	uint32_t		index;
165 } nxge_rxbuf_off_hdr_t, *p_nxge_rxbuf_off_hdr_t;
166 
167 /*
168  * Definitions for each receive buffer block.
169  */
170 typedef struct _nxge_rbb_t {
171 	nxge_os_dma_common_t	dma_buf_info;
172 	uint8_t			rbr_page_num;
173 	uint32_t		block_size;
174 	uint16_t		dma_channel;
175 	uint32_t		bytes_received;
176 	uint32_t		ref_cnt;
177 	uint_t			pkt_buf_size;
178 	uint_t			max_pkt_bufs;
179 	uint32_t		cur_usage_cnt;
180 } nxge_rbb_t, *p_nxge_rbb_t;
181 
182 
183 typedef struct _rx_tx_param_t {
184 	nxge_logical_page_t logical_pages[NXGE_MAX_LOGICAL_PAGES];
185 } rx_tx_param_t, *p_rx_tx_param_t;
186 
187 typedef struct _rx_tx_params {
188 	struct _tx_param_t 	*tx_param_p;
189 } rx_tx_params_t, *p_rx_tx_params_t;
190 
191 
192 typedef struct _rx_msg_t {
193 	nxge_os_dma_common_t	buf_dma;
194 	nxge_os_mutex_t 	lock;
195 	struct _nxge_t		*nxgep;
196 	struct _rx_rbr_ring_t	*rx_rbr_p;
197 	boolean_t 		spare_in_use;
198 	boolean_t 		free;
199 	uint32_t 		ref_cnt;
200 #ifdef RXBUFF_USE_SEPARATE_UP_CNTR
201 	uint32_t 		pass_up_cnt;
202 	boolean_t 		release;
203 #endif
204 	nxge_os_frtn_t 		freeb;
205 	size_t 			bytes_arrived;
206 	size_t 			bytes_expected;
207 	size_t 			block_size;
208 	uint32_t		block_index;
209 	uint32_t 		pkt_buf_size;
210 	uint32_t 		pkt_buf_size_code;
211 	uint32_t 		max_pkt_bufs;
212 	uint32_t		cur_usage_cnt;
213 	uint32_t		max_usage_cnt;
214 	uchar_t			*buffer;
215 	uint32_t 		pri;
216 	uint32_t 		shifted_addr;
217 	boolean_t		use_buf_pool;
218 	p_mblk_t 		rx_mblk_p;
219 	boolean_t		rx_use_bcopy;
220 } rx_msg_t, *p_rx_msg_t;
221 
222 typedef struct _rx_dma_handle_t {
223 	nxge_os_dma_handle_t	dma_handle;	/* DMA handle	*/
224 	nxge_os_acc_handle_t	acc_handle;	/* DMA memory handle */
225 	npi_handle_t		npi_handle;
226 } rx_dma_handle_t, *p_rx_dma_handle_t;
227 
228 #define	RXCOMP_HIST_ELEMENTS 100000
229 
230 typedef struct _nxge_rxcomphist_t {
231 	uint_t 			comp_cnt;
232 	uint64_t 		rx_comp_entry;
233 } nxge_rxcomphist_t, *p_nxge_rxcomphist_t;
234 
235 /* Receive Completion Ring */
236 typedef struct _rx_rcr_ring_t {
237 	nxge_os_dma_common_t	rcr_desc;
238 	uint8_t			rcr_page_num;
239 	uint8_t			rcr_buf_page_num;
240 
241 	struct _nxge_t		*nxgep;
242 
243 	p_nxge_rx_ring_stats_t	rdc_stats;
244 
245 	rcrcfig_a_t		rcr_cfga;
246 	rcrcfig_b_t		rcr_cfgb;
247 	boolean_t		cfg_set;
248 
249 	nxge_os_mutex_t 	lock;
250 	uint16_t		index;
251 	uint16_t		rdc;
252 	uint16_t		rdc_grp_id;
253 	uint16_t		ldg_group_id;
254 	boolean_t		full_hdr_flag;	 /* 1: 18 bytes header */
255 	uint16_t		sw_priv_hdr_len; /* 0 - 192 bytes (SW) */
256 	uint32_t 		comp_size;	 /* # of RCR entries */
257 	uint64_t		rcr_addr;
258 	uint_t 			comp_wrap_mask;
259 	uint_t 			comp_rd_index;
260 	uint_t 			comp_wt_index;
261 
262 	p_rcr_entry_t		rcr_desc_first_p;
263 	p_rcr_entry_t		rcr_desc_first_pp;
264 	p_rcr_entry_t		rcr_desc_last_p;
265 	p_rcr_entry_t		rcr_desc_last_pp;
266 
267 	p_rcr_entry_t		rcr_desc_rd_head_p;	/* software next read */
268 	p_rcr_entry_t		rcr_desc_rd_head_pp;
269 
270 	p_rcr_entry_t		rcr_desc_wt_tail_p;	/* hardware write */
271 	p_rcr_entry_t		rcr_desc_wt_tail_pp;
272 
273 	uint64_t		rcr_tail_pp;
274 	uint64_t		rcr_head_pp;
275 	struct _rx_rbr_ring_t	*rx_rbr_p;
276 	uint32_t		intr_timeout;
277 	uint32_t		intr_threshold;
278 	uint64_t		max_receive_pkts;
279 	p_mblk_t		rx_first_mp;
280 	mac_resource_handle_t	rcr_mac_handle;
281 	uint32_t		rcvd_pkt_bytes; /* Received bytes of a packet */
282 } rx_rcr_ring_t, *p_rx_rcr_ring_t;
283 
284 
285 
286 /* Buffer index information */
287 typedef struct _rxbuf_index_info_t {
288 	uint32_t buf_index;
289 	uint32_t start_index;
290 	uint32_t buf_size;
291 	uint64_t dvma_addr;
292 	uint64_t kaddr;
293 } rxbuf_index_info_t, *p_rxbuf_index_info_t;
294 
295 /* Buffer index information */
296 
297 typedef struct _rxring_info_t {
298 	uint32_t hint[3];
299 	uint32_t block_size_mask;
300 	uint16_t max_iterations;
301 	rxbuf_index_info_t buffer[NXGE_DMA_BLOCK];
302 } rxring_info_t, *p_rxring_info_t;
303 
304 
305 typedef enum {
306 	RBR_POSTING = 1,	/* We may post rx buffers. */
307 	RBR_UNMAPPING,		/* We are in the process of unmapping. */
308 	RBR_UNMAPPED		/* The ring is unmapped. */
309 } rbr_state_t;
310 
311 
312 /* Receive Buffer Block Ring */
313 typedef struct _rx_rbr_ring_t {
314 	nxge_os_dma_common_t	rbr_desc;
315 	p_rx_msg_t 		*rx_msg_ring;
316 	p_nxge_dma_common_t 	*dma_bufp;
317 	rbr_cfig_a_t		rbr_cfga;
318 	rbr_cfig_b_t		rbr_cfgb;
319 	rbr_kick_t		rbr_kick;
320 	log_page_vld_t		page_valid;
321 	log_page_mask_t		page_mask_1;
322 	log_page_mask_t		page_mask_2;
323 	log_page_value_t	page_value_1;
324 	log_page_value_t	page_value_2;
325 	log_page_relo_t		page_reloc_1;
326 	log_page_relo_t		page_reloc_2;
327 	log_page_hdl_t		page_hdl;
328 
329 	boolean_t		cfg_set;
330 
331 	nxge_os_mutex_t		lock;
332 	nxge_os_mutex_t		post_lock;
333 	uint16_t		index;
334 	struct _nxge_t		*nxgep;
335 	uint16_t		rdc;
336 	uint16_t		rdc_grp_id;
337 	uint_t 			rbr_max_size;
338 	uint64_t		rbr_addr;
339 	uint_t 			rbr_wrap_mask;
340 	uint_t 			rbb_max;
341 	uint_t 			rbb_added;
342 	uint_t			block_size;
343 	uint_t			num_blocks;
344 	uint_t			tnblocks;
345 	uint_t			pkt_buf_size0;
346 	uint_t			pkt_buf_size0_bytes;
347 	uint_t			npi_pkt_buf_size0;
348 	uint_t			pkt_buf_size1;
349 	uint_t			pkt_buf_size1_bytes;
350 	uint_t			npi_pkt_buf_size1;
351 	uint_t			pkt_buf_size2;
352 	uint_t			pkt_buf_size2_bytes;
353 	uint_t			npi_pkt_buf_size2;
354 
355 	uint64_t		rbr_head_pp;
356 	uint64_t		rbr_tail_pp;
357 	uint32_t		*rbr_desc_vp;
358 
359 	p_rx_rcr_ring_t		rx_rcr_p;
360 
361 	rx_dma_ent_msk_t	rx_dma_ent_mask;
362 
363 	rbr_hdh_t		rbr_head;
364 	rbr_hdl_t		rbr_tail;
365 	uint_t 			rbr_wr_index;
366 	uint_t 			rbr_rd_index;
367 	uint_t 			rbr_hw_head_index;
368 	uint64_t 		rbr_hw_head_ptr;
369 
370 	/* may not be needed */
371 	p_nxge_rbb_t		rbb_p;
372 
373 	rxring_info_t  *ring_info;
374 #ifdef RX_USE_RECLAIM_POST
375 	uint32_t hw_freed;
376 	uint32_t sw_freed;
377 	uint32_t msg_rd_index;
378 	uint32_t msg_cnt;
379 #endif
380 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
381 	uint64_t		hv_rx_buf_base_ioaddr_pp;
382 	uint64_t		hv_rx_buf_ioaddr_size;
383 	uint64_t		hv_rx_cntl_base_ioaddr_pp;
384 	uint64_t		hv_rx_cntl_ioaddr_size;
385 	boolean_t		hv_set;
386 #endif
387 	uint_t 			rbr_consumed;
388 	uint_t 			rbr_threshold_hi;
389 	uint_t 			rbr_threshold_lo;
390 	nxge_rxbuf_type_t	rbr_bufsize_type;
391 	boolean_t		rbr_use_bcopy;
392 
393 	/*
394 	 * <rbr_ref_cnt> is a count of those receive buffers which
395 	 * have been loaned to the kernel.  We will not free this
396 	 * ring until the reference count reaches zero (0).
397 	 */
398 	uint32_t		rbr_ref_cnt;
399 	rbr_state_t		rbr_state; /* POSTING, etc */
400 
401 } rx_rbr_ring_t, *p_rx_rbr_ring_t;
402 
403 /* Receive Mailbox */
404 typedef struct _rx_mbox_t {
405 	nxge_os_dma_common_t	rx_mbox;
406 	rxdma_cfig1_t		rx_cfg1;
407 	rxdma_cfig2_t		rx_cfg2;
408 	uint64_t		mbox_addr;
409 	boolean_t		cfg_set;
410 
411 	nxge_os_mutex_t 	lock;
412 	uint16_t		index;
413 	struct _nxge_t		*nxgep;
414 	uint16_t		rdc;
415 } rx_mbox_t, *p_rx_mbox_t;
416 
417 
418 typedef struct _rx_rbr_rings_t {
419 	p_rx_rbr_ring_t 	*rbr_rings;
420 	uint32_t			ndmas;
421 	boolean_t		rxbuf_allocated;
422 } rx_rbr_rings_t, *p_rx_rbr_rings_t;
423 
424 typedef struct _rx_rcr_rings_t {
425 	p_rx_rcr_ring_t 	*rcr_rings;
426 	uint32_t			ndmas;
427 	boolean_t		cntl_buf_allocated;
428 } rx_rcr_rings_t, *p_rx_rcr_rings_t;
429 
430 typedef struct _rx_mbox_areas_t {
431 	p_rx_mbox_t 		*rxmbox_areas;
432 	uint32_t			ndmas;
433 	boolean_t		mbox_allocated;
434 } rx_mbox_areas_t, *p_rx_mbox_areas_t;
435 
436 /*
437  * Global register definitions per chip and they are initialized
438  * using the function zero control registers.
439  * .
440  */
441 
442 typedef struct _rxdma_globals {
443 	boolean_t		mode32;
444 	uint16_t		rxdma_ck_div_cnt;
445 	uint16_t		rxdma_red_ran_init;
446 	uint32_t		rxdma_eing_timeout;
447 } rxdma_globals_t, *p_rxdma_globals;
448 
449 
450 /*
451  * Receive DMA Prototypes.
452  */
453 nxge_status_t nxge_init_rxdma_channel_rcrflush(p_nxge_t, uint8_t);
454 nxge_status_t nxge_init_rxdma_channels(p_nxge_t);
455 void nxge_uninit_rxdma_channels(p_nxge_t);
456 nxge_status_t nxge_reset_rxdma_channel(p_nxge_t, uint16_t);
457 nxge_status_t nxge_init_rxdma_channel_cntl_stat(p_nxge_t,
458 	uint16_t, p_rx_dma_ctl_stat_t);
459 nxge_status_t nxge_enable_rxdma_channel(p_nxge_t,
460 	uint16_t, p_rx_rbr_ring_t, p_rx_rcr_ring_t,
461 	p_rx_mbox_t);
462 nxge_status_t nxge_init_rxdma_channel_event_mask(p_nxge_t,
463 		uint16_t, p_rx_dma_ent_msk_t);
464 
465 nxge_status_t nxge_rxdma_hw_mode(p_nxge_t, boolean_t);
466 void nxge_hw_start_rx(p_nxge_t);
467 void nxge_fixup_rxdma_rings(p_nxge_t);
468 nxge_status_t nxge_dump_rxdma_channel(p_nxge_t, uint8_t);
469 
470 void nxge_rxdma_fix_channel(p_nxge_t, uint16_t);
471 void nxge_rxdma_fixup_channel(p_nxge_t, uint16_t, int);
472 int nxge_rxdma_get_ring_index(p_nxge_t, uint16_t);
473 
474 void nxge_rxdma_regs_dump_channels(p_nxge_t);
475 nxge_status_t nxge_rxdma_handle_sys_errors(p_nxge_t);
476 void nxge_rxdma_inject_err(p_nxge_t, uint32_t, uint8_t);
477 
478 
479 #ifdef	__cplusplus
480 }
481 #endif
482 
483 #endif	/* _SYS_NXGE_NXGE_RXDMA_H */
484