1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#ifndef	_SYS_NXGE_NXGE_RXDMA_H
28#define	_SYS_NXGE_NXGE_RXDMA_H
29
30#ifdef	__cplusplus
31extern "C" {
32#endif
33
34#include <sys/nxge/nxge_rxdma_hw.h>
35#include <npi_rxdma.h>
36
37#define	RXDMA_CK_DIV_DEFAULT		7500 	/* 25 usec */
38/*
39 * Hardware RDC designer: 8 cache lines during Atlas bringup.
40 */
41#define	RXDMA_RED_LESS_BYTES		(8 * 64) /* 8 cache line */
42#define	RXDMA_RED_LESS_ENTRIES		(RXDMA_RED_LESS_BYTES/8)
43#define	RXDMA_RED_WINDOW_DEFAULT	0
44#define	RXDMA_RED_THRES_DEFAULT		0
45
46#define	RXDMA_RCR_PTHRES_DEFAULT	0x20
47#define	RXDMA_RCR_TO_DEFAULT		0x8
48
49/*
50 * hardware workarounds: kick 16 (was 8 before)
51 */
52#define	NXGE_RXDMA_POST_BATCH		16
53
54#define	RXBUF_START_ADDR(a, index, bsize)	((a & (index * bsize))
55#define	RXBUF_OFFSET_FROM_START(a, start)	(start - a)
56#define	RXBUF_64B_ALIGNED		64
57
58#define	NXGE_RXBUF_EXTRA		34
59/*
60 * Receive buffer thresholds and buffer types
61 */
62#define	NXGE_RX_BCOPY_SCALE	8	/* use 1/8 as lowest granularity */
63typedef enum  {
64	NXGE_RX_COPY_ALL = 0,		/* do bcopy on every packet	 */
65	NXGE_RX_COPY_1,			/* bcopy on 1/8 of buffer posted */
66	NXGE_RX_COPY_2,			/* bcopy on 2/8 of buffer posted */
67	NXGE_RX_COPY_3,			/* bcopy on 3/8 of buffer posted */
68	NXGE_RX_COPY_4,			/* bcopy on 4/8 of buffer posted */
69	NXGE_RX_COPY_5,			/* bcopy on 5/8 of buffer posted */
70	NXGE_RX_COPY_6,			/* bcopy on 6/8 of buffer posted */
71	NXGE_RX_COPY_7,			/* bcopy on 7/8 of buffer posted */
72	NXGE_RX_COPY_NONE		/* don't do bcopy at all	 */
73} nxge_rxbuf_threshold_t;
74
75typedef enum  {
76	NXGE_RBR_TYPE0 = RCR_PKTBUFSZ_0,  /* bcopy buffer size 0 (small) */
77	NXGE_RBR_TYPE1 = RCR_PKTBUFSZ_1,  /* bcopy buffer size 1 (medium) */
78	NXGE_RBR_TYPE2 = RCR_PKTBUFSZ_2	  /* bcopy buffer size 2 (large) */
79} nxge_rxbuf_type_t;
80
81typedef	struct _rdc_errlog {
82	rdmc_par_err_log_t	pre_par;
83	rdmc_par_err_log_t	sha_par;
84	uint8_t			compl_err_type;
85} rdc_errlog_t;
86
87/*
88 * Receive  Statistics.
89 */
90typedef struct _nxge_rx_ring_stats_t {
91	uint64_t	ipackets;
92	uint64_t	ibytes;
93	uint32_t	ierrors;
94	uint32_t	multircv;
95	uint32_t	brdcstrcv;
96	uint32_t	norcvbuf;
97
98	uint32_t	rx_inits;
99	uint32_t	rx_jumbo_pkts;
100	uint32_t	rx_multi_pkts;
101	uint32_t	rx_mtu_pkts;
102	uint32_t	rx_no_buf;
103
104	/*
105	 * Receive buffer management statistics.
106	 */
107	uint32_t	rx_new_pages;
108	uint32_t	rx_new_mtu_pgs;
109	uint32_t	rx_new_nxt_pgs;
110	uint32_t	rx_reused_pgs;
111	uint32_t	rx_mtu_drops;
112	uint32_t	rx_nxt_drops;
113
114	/*
115	 * Error event stats.
116	 */
117	uint32_t	rx_rbr_tmout;
118	uint32_t	pkt_too_long_err;
119	uint32_t	l2_err;
120	uint32_t	l4_cksum_err;
121	uint32_t	fflp_soft_err;
122	uint32_t	zcp_soft_err;
123	uint32_t	rcr_unknown_err;
124	uint32_t	dcf_err;
125	uint32_t 	rbr_tmout;
126	uint32_t 	rsp_cnt_err;
127	uint32_t 	byte_en_err;
128	uint32_t 	byte_en_bus;
129	uint32_t 	rsp_dat_err;
130	uint32_t 	rcr_ack_err;
131	uint32_t 	dc_fifo_err;
132	uint32_t 	rcr_sha_par;
133	uint32_t 	rbr_pre_par;
134	uint32_t 	port_drop_pkt;
135	uint32_t 	wred_drop;
136	uint32_t 	rbr_pre_empty;
137	uint32_t 	rcr_shadow_full;
138	uint32_t 	config_err;
139	uint32_t 	rcrincon;
140	uint32_t 	rcrfull;
141	uint32_t 	rbr_empty;
142	uint32_t 	rbrfull;
143	uint32_t 	rbrlogpage;
144	uint32_t 	cfiglogpage;
145	uint32_t 	rcrto;
146	uint32_t 	rcrthres;
147	uint32_t 	mex;
148	rdc_errlog_t	errlog;
149} nxge_rx_ring_stats_t, *p_nxge_rx_ring_stats_t;
150
151typedef struct _nxge_rdc_sys_stats {
152	uint32_t	pre_par;
153	uint32_t	sha_par;
154	uint32_t	id_mismatch;
155	uint32_t	ipp_eop_err;
156	uint32_t	zcp_eop_err;
157} nxge_rdc_sys_stats_t, *p_nxge_rdc_sys_stats_t;
158
159/*
160 * Software reserved buffer offset
161 */
162typedef struct _nxge_rxbuf_off_hdr_t {
163	uint32_t		index;
164} nxge_rxbuf_off_hdr_t, *p_nxge_rxbuf_off_hdr_t;
165
166
167typedef struct _rx_msg_t {
168	nxge_os_dma_common_t	buf_dma;
169	nxge_os_mutex_t 	lock;
170	struct _nxge_t		*nxgep;
171	struct _rx_rbr_ring_t	*rx_rbr_p;
172	boolean_t 		spare_in_use;
173	boolean_t 		free;
174	uint32_t 		ref_cnt;
175#ifdef RXBUFF_USE_SEPARATE_UP_CNTR
176	uint32_t 		pass_up_cnt;
177	boolean_t 		release;
178#endif
179	nxge_os_frtn_t 		freeb;
180	size_t 			bytes_arrived;
181	size_t 			bytes_expected;
182	size_t 			block_size;
183	uint32_t		block_index;
184	uint32_t 		pkt_buf_size;
185	uint32_t 		pkt_buf_size_code;
186	uint32_t 		max_pkt_bufs;
187	uint32_t		cur_usage_cnt;
188	uint32_t		max_usage_cnt;
189	uchar_t			*buffer;
190	uint32_t 		pri;
191	uint32_t 		shifted_addr;
192	boolean_t		use_buf_pool;
193	p_mblk_t 		rx_mblk_p;
194	boolean_t		rx_use_bcopy;
195} rx_msg_t, *p_rx_msg_t;
196
197typedef struct _rx_dma_handle_t {
198	nxge_os_dma_handle_t	dma_handle;	/* DMA handle	*/
199	nxge_os_acc_handle_t	acc_handle;	/* DMA memory handle */
200	npi_handle_t		npi_handle;
201} rx_dma_handle_t, *p_rx_dma_handle_t;
202
203
204/* Receive Completion Ring */
205typedef struct _rx_rcr_ring_t {
206	nxge_os_dma_common_t	rcr_desc;
207
208	struct _nxge_t		*nxgep;
209
210	p_nxge_rx_ring_stats_t	rdc_stats;
211
212	boolean_t		poll_flag;	/* B_TRUE, if polling mode */
213
214	rcrcfig_a_t		rcr_cfga;
215	rcrcfig_b_t		rcr_cfgb;
216
217	nxge_os_mutex_t 	lock;
218	uint16_t		index;
219	uint16_t		rdc;
220	boolean_t		full_hdr_flag;	 /* 1: 18 bytes header */
221	uint16_t		sw_priv_hdr_len; /* 0 - 192 bytes (SW) */
222	uint32_t 		comp_size;	 /* # of RCR entries */
223	uint64_t		rcr_addr;
224	uint_t 			comp_wrap_mask;
225	uint_t 			comp_rd_index;
226	uint_t 			comp_wt_index;
227
228	p_rcr_entry_t		rcr_desc_first_p;
229	p_rcr_entry_t		rcr_desc_first_pp;
230	p_rcr_entry_t		rcr_desc_last_p;
231	p_rcr_entry_t		rcr_desc_last_pp;
232
233	p_rcr_entry_t		rcr_desc_rd_head_p;	/* software next read */
234	p_rcr_entry_t		rcr_desc_rd_head_pp;
235
236	uint64_t		rcr_tail_pp;
237	uint64_t		rcr_head_pp;
238	struct _rx_rbr_ring_t	*rx_rbr_p;
239	uint32_t		intr_timeout;
240	uint32_t		intr_threshold;
241	uint64_t		max_receive_pkts;
242	mac_ring_handle_t	rcr_mac_handle;
243	uint64_t		rcr_gen_num;
244	uint32_t		rcvd_pkt_bytes; /* Received bytes of a packet */
245	p_nxge_ldv_t		ldvp;
246	p_nxge_ldg_t		ldgp;
247	boolean_t		started;
248} rx_rcr_ring_t, *p_rx_rcr_ring_t;
249
250
251
252/* Buffer index information */
253typedef struct _rxbuf_index_info_t {
254	uint32_t buf_index;
255	uint32_t start_index;
256	uint32_t buf_size;
257	uint64_t dvma_addr;
258	uint64_t kaddr;
259} rxbuf_index_info_t, *p_rxbuf_index_info_t;
260
261/*
262 * Buffer index information
263 */
264typedef struct _rxring_info_t {
265	uint32_t hint[RCR_N_PKTBUF_SZ];
266	uint32_t block_size_mask;
267	uint16_t max_iterations;
268	rxbuf_index_info_t buffer[NXGE_DMA_BLOCK];
269} rxring_info_t, *p_rxring_info_t;
270
271
272typedef enum {
273	RBR_POSTING = 1,	/* We may post rx buffers. */
274	RBR_UNMAPPING,		/* We are in the process of unmapping. */
275	RBR_UNMAPPED		/* The ring is unmapped. */
276} rbr_state_t;
277
278
279/* Receive Buffer Block Ring */
280typedef struct _rx_rbr_ring_t {
281	nxge_os_dma_common_t	rbr_desc;
282	p_rx_msg_t 		*rx_msg_ring;
283	p_nxge_dma_common_t 	*dma_bufp;
284	rbr_cfig_a_t		rbr_cfga;
285	rbr_cfig_b_t		rbr_cfgb;
286	rbr_kick_t		rbr_kick;
287	log_page_vld_t		page_valid;
288	log_page_mask_t		page_mask_1;
289	log_page_mask_t		page_mask_2;
290	log_page_value_t	page_value_1;
291	log_page_value_t	page_value_2;
292	log_page_relo_t		page_reloc_1;
293	log_page_relo_t		page_reloc_2;
294	log_page_hdl_t		page_hdl;
295
296	boolean_t		cfg_set;
297
298	nxge_os_mutex_t		lock;
299	nxge_os_mutex_t		post_lock;
300	uint16_t		index;
301	struct _nxge_t		*nxgep;
302	uint16_t		rdc;
303	uint16_t		rdc_grp_id;
304	uint_t 			rbr_max_size;
305	uint64_t		rbr_addr;
306	uint_t 			rbr_wrap_mask;
307	uint_t 			rbb_max;
308	uint_t 			rbb_added;
309	uint_t			block_size;
310	uint_t			num_blocks;
311	uint_t			tnblocks;
312	uint_t			pkt_buf_size0;
313	uint_t			pkt_buf_size0_bytes;
314	uint_t			npi_pkt_buf_size0;
315	uint_t			pkt_buf_size1;
316	uint_t			pkt_buf_size1_bytes;
317	uint_t			npi_pkt_buf_size1;
318	uint_t			pkt_buf_size2;
319	uint_t			pkt_buf_size2_bytes;
320	uint_t			npi_pkt_buf_size2;
321
322	uint32_t		*rbr_desc_vp;
323
324	p_rx_rcr_ring_t		rx_rcr_p;
325
326	uint_t 			rbr_wr_index;
327	uint_t 			rbr_rd_index;
328
329	rxring_info_t  *ring_info;
330#if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
331	uint64_t		hv_rx_buf_base_ioaddr_pp;
332	uint64_t		hv_rx_buf_ioaddr_size;
333	uint64_t		hv_rx_cntl_base_ioaddr_pp;
334	uint64_t		hv_rx_cntl_ioaddr_size;
335	boolean_t		hv_set;
336#endif
337	uint_t 			rbr_consumed;
338	uint_t 			rbr_threshold_hi;
339	uint_t 			rbr_threshold_lo;
340	nxge_rxbuf_type_t	rbr_bufsize_type;
341	boolean_t		rbr_use_bcopy;
342
343	/*
344	 * <rbr_ref_cnt> is a count of those receive buffers which
345	 * have been loaned to the kernel.  We will not free this
346	 * ring until the reference count reaches zero (0).
347	 */
348	uint32_t		rbr_ref_cnt;
349	rbr_state_t		rbr_state; /* POSTING, etc */
350	/*
351	 * Receive buffer allocation types:
352	 *   ddi_dma_mem_alloc(), contig_mem_alloc(), kmem_alloc()
353	 */
354	buf_alloc_type_t	rbr_alloc_type;
355} rx_rbr_ring_t, *p_rx_rbr_ring_t;
356
357/* Receive Mailbox */
358typedef struct _rx_mbox_t {
359	nxge_os_dma_common_t	rx_mbox;
360	rxdma_cfig1_t		rx_cfg1;
361	rxdma_cfig2_t		rx_cfg2;
362	uint64_t		mbox_addr;
363	boolean_t		cfg_set;
364
365	nxge_os_mutex_t 	lock;
366	uint16_t		index;
367	struct _nxge_t		*nxgep;
368	uint16_t		rdc;
369} rx_mbox_t, *p_rx_mbox_t;
370
371
372typedef struct _rx_rbr_rings_t {
373	p_rx_rbr_ring_t 	*rbr_rings;
374	uint32_t		ndmas;
375	boolean_t		rxbuf_allocated;
376} rx_rbr_rings_t, *p_rx_rbr_rings_t;
377
378typedef struct _rx_rcr_rings_t {
379	p_rx_rcr_ring_t 	*rcr_rings;
380	uint32_t		ndmas;
381	boolean_t		cntl_buf_allocated;
382} rx_rcr_rings_t, *p_rx_rcr_rings_t;
383
384typedef struct _rx_mbox_areas_t {
385	p_rx_mbox_t 		*rxmbox_areas;
386	uint32_t		ndmas;
387	boolean_t		mbox_allocated;
388} rx_mbox_areas_t, *p_rx_mbox_areas_t;
389
390/*
391 * Global register definitions per chip and they are initialized
392 * using the function zero control registers.
393 * .
394 */
395
396typedef struct _rxdma_globals {
397	boolean_t		mode32;
398	uint16_t		rxdma_ck_div_cnt;
399	uint16_t		rxdma_red_ran_init;
400	uint32_t		rxdma_eing_timeout;
401} rxdma_globals_t, *p_rxdma_globals;
402
403
404/*
405 * Receive DMA Prototypes.
406 */
407nxge_status_t nxge_init_rxdma_channels(p_nxge_t);
408void nxge_uninit_rxdma_channels(p_nxge_t);
409
410nxge_status_t nxge_init_rxdma_channel(p_nxge_t, int);
411void nxge_uninit_rxdma_channel(p_nxge_t, int);
412
413nxge_status_t nxge_init_rxdma_channel_rcrflush(p_nxge_t, uint8_t);
414nxge_status_t nxge_reset_rxdma_channel(p_nxge_t, uint16_t);
415nxge_status_t nxge_init_rxdma_channel_cntl_stat(p_nxge_t,
416	uint16_t, p_rx_dma_ctl_stat_t);
417nxge_status_t nxge_enable_rxdma_channel(p_nxge_t,
418	uint16_t, p_rx_rbr_ring_t, p_rx_rcr_ring_t,
419	p_rx_mbox_t);
420nxge_status_t nxge_init_rxdma_channel_event_mask(p_nxge_t,
421		uint16_t, p_rx_dma_ent_msk_t);
422
423nxge_status_t nxge_rxdma_hw_mode(p_nxge_t, boolean_t);
424void nxge_hw_start_rx(p_nxge_t);
425void nxge_fixup_rxdma_rings(p_nxge_t);
426nxge_status_t nxge_dump_rxdma_channel(p_nxge_t, uint8_t);
427
428void nxge_rxdma_fix_channel(p_nxge_t, uint16_t);
429
430mblk_t *nxge_rx_poll(void *, int);
431int nxge_enable_poll(void *);
432int nxge_disable_poll(void *);
433
434void nxge_rxdma_regs_dump_channels(p_nxge_t);
435nxge_status_t nxge_rxdma_handle_sys_errors(p_nxge_t);
436void nxge_rxdma_inject_err(p_nxge_t, uint32_t, uint8_t);
437
438extern nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t);
439extern nxge_status_t nxge_alloc_rxb(p_nxge_t nxgep, int channel);
440extern void nxge_free_rxb(p_nxge_t nxgep, int channel);
441
442int nxge_get_rxring_index(p_nxge_t, int, int);
443
444#ifdef	__cplusplus
445}
446#endif
447
448#endif	/* _SYS_NXGE_NXGE_RXDMA_H */
449