xref: /illumos-gate/usr/src/uts/common/io/mlxcx/mlxcx.h (revision ebb7c6fd4f966f94af3e235242b8a39b7a53664a)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2020, The University of Queensland
14  * Copyright (c) 2018, Joyent, Inc.
15  */
16 
17 /*
18  * Mellanox Connect-X 4/5/6 driver.
19  *
20  * More details in mlxcx.c
21  */
22 
23 #ifndef _MLXCX_H
24 #define	_MLXCX_H
25 
26 /*
27  * mlxcx(7D) defintions
28  */
29 
30 #include <sys/ddi.h>
31 #include <sys/sunddi.h>
32 #include <sys/ddifm.h>
33 #include <sys/id_space.h>
34 #include <sys/list.h>
35 #include <sys/stddef.h>
36 #include <sys/stream.h>
37 #include <sys/strsun.h>
38 #include <sys/mac_provider.h>
39 #include <sys/mac_ether.h>
40 #include <sys/cpuvar.h>
41 #include <sys/ethernet.h>
42 
43 #include <inet/ip.h>
44 #include <inet/ip6.h>
45 
46 #include <sys/ddifm.h>
47 #include <sys/fm/protocol.h>
48 #include <sys/fm/util.h>
49 #include <sys/fm/io/ddi.h>
50 
51 #include <mlxcx_reg.h>
52 
53 #ifdef __cplusplus
54 extern "C" {
55 #endif
56 
57 /*
58  * Get access to the first PCI BAR.
59  */
60 #define	MLXCX_REG_NUMBER		1
61 
62 /*
63  * The command queue is supposed to be a page, which is 4k.
64  */
65 #define	MLXCX_CMD_DMA_PAGE_SIZE		4096
66 
67 /*
68  * Queues can allocate in units of this much memory.
69  */
70 #define	MLXCX_QUEUE_DMA_PAGE_SIZE	4096
71 
72 /*
73  * We advertise two sizes of groups to MAC -- a certain number of "large"
74  * groups (including the default group, which is sized to at least ncpus)
75  * followed by a certain number of "small" groups.
76  *
77  * This allows us to have a larger amount of classification resources available
78  * for zones/VMs without resorting to software classification.
79  */
80 #define	MLXCX_RX_NGROUPS_LARGE_DFLT		2
81 #define	MLXCX_RX_NRINGS_PER_LARGE_GROUP_DFLT	16
82 #define	MLXCX_RX_NGROUPS_SMALL_DFLT		256
83 #define	MLXCX_RX_NRINGS_PER_SMALL_GROUP_DFLT	4
84 
85 #define	MLXCX_TX_NGROUPS_DFLT		1
86 #define	MLXCX_TX_NRINGS_PER_GROUP_DFLT	64
87 
88 /*
89  * Queues will be sized to (1 << *Q_SIZE_SHIFT) entries long.
90  */
91 #define	MLXCX_EQ_SIZE_SHIFT_DFLT	9
92 #define	MLXCX_CQ_SIZE_SHIFT_DFLT	10
93 
94 /*
95  * Default to making SQs bigger than RQs for 9k MTU, since most packets will
96  * spill over into more than one slot. RQ WQEs are always 1 slot.
97  */
98 #define	MLXCX_SQ_SIZE_SHIFT_DFLT	11
99 #define	MLXCX_RQ_SIZE_SHIFT_DFLT	10
100 
101 #define	MLXCX_CQ_HWM_GAP		16
102 #define	MLXCX_CQ_LWM_GAP		24
103 
104 #define	MLXCX_RQ_REFILL_STEP		64
105 
106 /*
107  * CQ event moderation
108  */
109 #define	MLXCX_CQEMOD_PERIOD_USEC_DFLT	50
110 #define	MLXCX_CQEMOD_COUNT_DFLT		\
111 	(8 * ((1 << MLXCX_CQ_SIZE_SHIFT_DFLT) / 10))
112 
113 /*
114  * EQ interrupt moderation
115  */
116 #define	MLXCX_INTRMOD_PERIOD_USEC_DFLT	10
117 
118 /* Size of root flow tables */
119 #define	MLXCX_FTBL_ROOT_SIZE_SHIFT_DFLT		12
120 
121 /* Size of 2nd level flow tables for VLAN filtering */
122 #define	MLXCX_FTBL_VLAN_SIZE_SHIFT_DFLT		4
123 
124 /*
125  * How big does an mblk have to be before we dma_bind() it instead of
126  * bcopying?
127  */
128 #define	MLXCX_TX_BIND_THRESHOLD_DFLT	2048
129 
130 /*
131  * How often to check the status of completion queues for overflow and
132  * other problems.
133  */
134 #define	MLXCX_WQ_CHECK_INTERVAL_SEC_DFLT		300
135 #define	MLXCX_CQ_CHECK_INTERVAL_SEC_DFLT		300
136 #define	MLXCX_EQ_CHECK_INTERVAL_SEC_DFLT		30
137 
138 #define	MLXCX_DOORBELL_TRIES_DFLT		3
139 extern uint_t mlxcx_doorbell_tries;
140 
141 #define	MLXCX_STUCK_INTR_COUNT_DFLT		128
142 extern uint_t mlxcx_stuck_intr_count;
143 
144 #define	MLXCX_BUF_BIND_MAX_ATTEMTPS		50
145 
146 #define	MLXCX_MTU_OFFSET	\
147 	(sizeof (struct ether_vlan_header) + ETHERFCSL)
148 
149 /*
150  * This is the current version of the command structure that the driver expects
151  * to be found in the ISS.
152  */
153 #define	MLXCX_CMD_REVISION	5
154 
155 #ifdef	DEBUG
156 #define	MLXCX_DMA_SYNC(dma, flag)	VERIFY0(ddi_dma_sync( \
157 					    (dma).mxdb_dma_handle, 0, 0, \
158 					    (flag)))
159 #else
160 #define	MLXCX_DMA_SYNC(dma, flag)	(void) ddi_dma_sync( \
161 					    (dma).mxdb_dma_handle, 0, 0, \
162 					    (flag))
163 #endif
164 
165 #define	MLXCX_FM_SERVICE_MLXCX	"mlxcx"
166 
167 /*
168  * This macro defines the expected value of the 'Interface Step Sequence ID'
169  * (issi) which represents the version of the start up and tear down sequence.
170  * We must check that hardware supports this and tell it which version we're
171  * using as well.
172  */
173 #define	MLXCX_CURRENT_ISSI	1
174 
175 /*
176  * This is the size of a page that the hardware expects from us when
177  * manipulating pages.
178  */
179 #define	MLXCX_HW_PAGE_SIZE	4096
180 
181 /*
182  * This is a special lkey value used to terminate a list of scatter pointers.
183  */
184 #define	MLXCX_NULL_LKEY		0x100
185 
186 /*
187  * Forwards
188  */
189 struct mlxcx;
190 typedef struct mlxcx mlxcx_t;
191 
192 typedef enum {
193 	MLXCX_DMABUF_HDL_ALLOC		= 1 << 0,
194 	MLXCX_DMABUF_MEM_ALLOC		= 1 << 1,
195 	MLXCX_DMABUF_BOUND		= 1 << 2,
196 	MLXCX_DMABUF_FOREIGN		= 1 << 3,
197 } mlxcx_dma_buffer_flags_t;
198 
199 typedef struct mlxcx_dma_buffer {
200 	mlxcx_dma_buffer_flags_t	mxdb_flags;
201 	caddr_t				mxdb_va;	/* Buffer VA */
202 	size_t				mxdb_len;	/* Buffer logical len */
203 	ddi_acc_handle_t		mxdb_acc_handle;
204 	ddi_dma_handle_t		mxdb_dma_handle;
205 	uint_t				mxdb_ncookies;
206 } mlxcx_dma_buffer_t;
207 
208 typedef struct mlxcx_dev_page {
209 	list_node_t		mxdp_list;
210 	avl_node_t		mxdp_tree;
211 	uintptr_t		mxdp_pa;
212 	mlxcx_dma_buffer_t	mxdp_dma;
213 } mlxcx_dev_page_t;
214 
215 /*
216  * Data structure to keep track of all information related to the command queue.
217  */
218 typedef enum {
219 	MLXCX_CMD_QUEUE_S_IDLE = 1,
220 	MLXCX_CMD_QUEUE_S_BUSY,
221 	MLXCX_CMD_QUEUE_S_BROKEN
222 } mlxcx_cmd_queue_status_t;
223 
224 typedef struct mlxcx_cmd_queue {
225 	kmutex_t		mcmd_lock;
226 	kcondvar_t		mcmd_cv;
227 	mlxcx_dma_buffer_t	mcmd_dma;
228 	mlxcx_cmd_ent_t		*mcmd_ent;
229 
230 	uint8_t			mcmd_size_l2;
231 	uint8_t			mcmd_stride_l2;
232 
233 	mlxcx_cmd_queue_status_t	mcmd_status;
234 
235 	ddi_taskq_t		*mcmd_taskq;
236 	id_space_t		*mcmd_tokens;
237 } mlxcx_cmd_queue_t;
238 
239 typedef struct mlxcd_cmd_mbox {
240 	list_node_t		mlbox_node;
241 	mlxcx_dma_buffer_t	mlbox_dma;
242 	mlxcx_cmd_mailbox_t	*mlbox_data;
243 } mlxcx_cmd_mbox_t;
244 
245 typedef enum {
246 	MLXCX_EQ_ALLOC		= 1 << 0,	/* dma mem alloc'd, size set */
247 	MLXCX_EQ_CREATED	= 1 << 1,	/* CREATE_EQ sent to hw */
248 	MLXCX_EQ_DESTROYED	= 1 << 2,	/* DESTROY_EQ sent to hw */
249 	MLXCX_EQ_ARMED		= 1 << 3,	/* Armed through the UAR */
250 	MLXCX_EQ_POLLING	= 1 << 4,	/* Currently being polled */
251 } mlxcx_eventq_state_t;
252 
253 typedef struct mlxcx_bf {
254 	kmutex_t		mbf_mtx;
255 	uint_t			mbf_cnt;
256 	uint_t			mbf_even;
257 	uint_t			mbf_odd;
258 } mlxcx_bf_t;
259 
260 typedef struct mlxcx_uar {
261 	boolean_t		mlu_allocated;
262 	uint_t			mlu_num;
263 	uint_t			mlu_base;
264 
265 	volatile uint_t		mlu_bfcnt;
266 	mlxcx_bf_t		mlu_bf[MLXCX_BF_PER_UAR];
267 } mlxcx_uar_t;
268 
269 typedef struct mlxcx_pd {
270 	boolean_t		mlpd_allocated;
271 	uint32_t		mlpd_num;
272 } mlxcx_pd_t;
273 
274 typedef struct mlxcx_tdom {
275 	boolean_t		mltd_allocated;
276 	uint32_t		mltd_num;
277 } mlxcx_tdom_t;
278 
279 typedef enum {
280 	MLXCX_PORT_VPORT_PROMISC	= 1 << 0,
281 } mlxcx_port_flags_t;
282 
283 typedef struct mlxcx_flow_table mlxcx_flow_table_t;
284 typedef struct mlxcx_flow_group mlxcx_flow_group_t;
285 
286 typedef struct {
287 	uint64_t		mlps_rx_drops;
288 } mlxcx_port_stats_t;
289 
290 typedef enum {
291 	MLXCX_PORT_INIT		= 1 << 0
292 } mlxcx_port_init_t;
293 
294 typedef struct mlxcx_port {
295 	kmutex_t		mlp_mtx;
296 	mlxcx_port_init_t	mlp_init;
297 	mlxcx_t			*mlp_mlx;
298 	/*
299 	 * The mlp_num we have here starts at zero (it's an index), but the
300 	 * numbering we have to use for register access starts at 1. We
301 	 * currently write mlp_num into the other_vport fields in mlxcx_cmd.c
302 	 * (where 0 is a magic number meaning "my vport") so if we ever add
303 	 * support for virtualisation features and deal with more than one
304 	 * vport, we will probably have to change this.
305 	 */
306 	uint_t			mlp_num;
307 	mlxcx_port_flags_t	mlp_flags;
308 	uint64_t		mlp_guid;
309 	uint8_t			mlp_mac_address[ETHERADDRL];
310 
311 	uint_t			mlp_mtu;
312 	uint_t			mlp_max_mtu;
313 
314 	mlxcx_port_status_t	mlp_admin_status;
315 	mlxcx_port_status_t	mlp_oper_status;
316 
317 	boolean_t		mlp_autoneg;
318 	mlxcx_eth_proto_t	mlp_max_proto;
319 	mlxcx_eth_proto_t	mlp_admin_proto;
320 	mlxcx_eth_proto_t	mlp_oper_proto;
321 
322 	mlxcx_eth_inline_mode_t	mlp_wqe_min_inline;
323 
324 	/* Root flow tables */
325 	mlxcx_flow_table_t	*mlp_rx_flow;
326 	mlxcx_flow_table_t	*mlp_tx_flow;
327 
328 	mlxcx_flow_group_t	*mlp_promisc;
329 	mlxcx_flow_group_t	*mlp_bcast;
330 	mlxcx_flow_group_t	*mlp_umcast;
331 
332 	avl_tree_t		mlp_dmac_fe;
333 
334 	mlxcx_port_stats_t	mlp_stats;
335 
336 	mlxcx_module_status_t	mlp_last_modstate;
337 	mlxcx_module_error_type_t	mlp_last_moderr;
338 } mlxcx_port_t;
339 
340 typedef enum {
341 	MLXCX_EQ_TYPE_ANY,
342 	MLXCX_EQ_TYPE_RX,
343 	MLXCX_EQ_TYPE_TX
344 } mlxcx_eventq_type_t;
345 
346 typedef struct mlxcx_event_queue {
347 	kmutex_t		mleq_mtx;
348 	mlxcx_t			*mleq_mlx;
349 	mlxcx_eventq_state_t	mleq_state;
350 	mlxcx_eventq_type_t	mleq_type;
351 
352 	mlxcx_dma_buffer_t	mleq_dma;
353 
354 	size_t			mleq_entshift;
355 	size_t			mleq_nents;
356 	mlxcx_eventq_ent_t	*mleq_ent;
357 	uint32_t		mleq_cc;	/* consumer counter */
358 	uint32_t		mleq_cc_armed;
359 
360 	uint32_t		mleq_events;
361 
362 	uint32_t		mleq_badintrs;
363 
364 	/* Hardware eq number */
365 	uint_t			mleq_num;
366 	/* Index into the mlxcx_t's interrupts array */
367 	uint_t			mleq_intr_index;
368 
369 	/* UAR region that has this EQ's doorbell in it */
370 	mlxcx_uar_t		*mleq_uar;
371 
372 	/* Tree of CQn => mlxcx_completion_queue_t */
373 	avl_tree_t		mleq_cqs;
374 
375 	uint32_t		mleq_check_disarm_cc;
376 	uint_t			mleq_check_disarm_cnt;
377 } mlxcx_event_queue_t;
378 
379 typedef enum {
380 	MLXCX_TIS_CREATED		= 1 << 0,
381 	MLXCX_TIS_DESTROYED		= 1 << 1,
382 } mlxcx_tis_state_t;
383 
384 typedef struct mlxcx_tis {
385 	mlxcx_tis_state_t		mltis_state;
386 	list_node_t			mltis_entry;
387 	uint_t				mltis_num;
388 	mlxcx_tdom_t			*mltis_tdom;
389 } mlxcx_tis_t;
390 
391 typedef enum {
392 	MLXCX_BUFFER_INIT,
393 	MLXCX_BUFFER_FREE,
394 	MLXCX_BUFFER_ON_WQ,
395 	MLXCX_BUFFER_ON_LOAN,
396 	MLXCX_BUFFER_ON_CHAIN,
397 } mlxcx_buffer_state_t;
398 
399 typedef struct mlxcx_buf_shard {
400 	list_node_t		mlbs_entry;
401 	kmutex_t		mlbs_mtx;
402 	list_t			mlbs_busy;
403 	list_t			mlbs_free;
404 	kcondvar_t		mlbs_free_nonempty;
405 } mlxcx_buf_shard_t;
406 
407 typedef struct mlxcx_buffer {
408 	mlxcx_buf_shard_t	*mlb_shard;
409 	list_node_t		mlb_entry;
410 	list_node_t		mlb_cq_entry;
411 
412 	struct mlxcx_buffer	*mlb_tx_head;	/* head of tx chain */
413 	list_t			mlb_tx_chain;
414 	list_node_t		mlb_tx_chain_entry;
415 
416 	boolean_t		mlb_foreign;
417 	size_t			mlb_used;
418 	mblk_t			*mlb_tx_mp;
419 
420 	mlxcx_t			*mlb_mlx;
421 	mlxcx_buffer_state_t	mlb_state;
422 	uint_t			mlb_wqe_index;
423 	mlxcx_dma_buffer_t	mlb_dma;
424 	mblk_t			*mlb_mp;
425 	frtn_t			mlb_frtn;
426 } mlxcx_buffer_t;
427 
428 typedef enum {
429 	MLXCX_CQ_ALLOC		= 1 << 0,
430 	MLXCX_CQ_CREATED	= 1 << 1,
431 	MLXCX_CQ_DESTROYED	= 1 << 2,
432 	MLXCX_CQ_EQAVL		= 1 << 3,
433 	MLXCX_CQ_BLOCKED_MAC	= 1 << 4,
434 	MLXCX_CQ_TEARDOWN	= 1 << 5,
435 	MLXCX_CQ_POLLING	= 1 << 6,
436 	MLXCX_CQ_ARMED		= 1 << 7,
437 } mlxcx_completionq_state_t;
438 
439 typedef struct mlxcx_work_queue mlxcx_work_queue_t;
440 
441 typedef struct mlxcx_completion_queue {
442 	kmutex_t			mlcq_mtx;
443 	mlxcx_t				*mlcq_mlx;
444 	mlxcx_completionq_state_t	mlcq_state;
445 
446 	mlxcx_port_stats_t		*mlcq_stats;
447 
448 	list_node_t			mlcq_entry;
449 	avl_node_t			mlcq_eq_entry;
450 
451 	uint_t				mlcq_num;
452 
453 	mlxcx_work_queue_t		*mlcq_wq;
454 	mlxcx_event_queue_t		*mlcq_eq;
455 
456 	/* UAR region that has this CQ's UAR doorbell in it */
457 	mlxcx_uar_t			*mlcq_uar;
458 
459 	mlxcx_dma_buffer_t		mlcq_dma;
460 
461 	size_t				mlcq_entshift;
462 	size_t				mlcq_nents;
463 	mlxcx_completionq_ent_t		*mlcq_ent;
464 	uint32_t			mlcq_cc;	/* consumer counter */
465 	uint32_t			mlcq_cc_armed;	/* cc at last arm */
466 	uint32_t			mlcq_ec;	/* event counter */
467 	uint32_t			mlcq_ec_armed;	/* ec at last arm */
468 
469 	mlxcx_dma_buffer_t		mlcq_doorbell_dma;
470 	mlxcx_completionq_doorbell_t	*mlcq_doorbell;
471 
472 	uint64_t			mlcq_bufcnt;
473 	size_t				mlcq_bufhwm;
474 	size_t				mlcq_buflwm;
475 	list_t				mlcq_buffers;
476 	kmutex_t			mlcq_bufbmtx;
477 	list_t				mlcq_buffers_b;
478 
479 	uint_t				mlcq_check_disarm_cnt;
480 	uint64_t			mlcq_check_disarm_cc;
481 
482 	uint_t				mlcq_cqemod_period_usec;
483 	uint_t				mlcq_cqemod_count;
484 
485 	mac_ring_handle_t		mlcq_mac_hdl;
486 	uint64_t			mlcq_mac_gen;
487 
488 	boolean_t			mlcq_fm_repd_qstate;
489 } mlxcx_completion_queue_t;
490 
491 typedef enum {
492 	MLXCX_WQ_ALLOC		= 1 << 0,
493 	MLXCX_WQ_CREATED	= 1 << 1,
494 	MLXCX_WQ_STARTED	= 1 << 2,
495 	MLXCX_WQ_DESTROYED	= 1 << 3,
496 	MLXCX_WQ_TEARDOWN	= 1 << 4,
497 	MLXCX_WQ_BUFFERS	= 1 << 5,
498 } mlxcx_workq_state_t;
499 
500 typedef enum {
501 	MLXCX_WQ_TYPE_SENDQ = 1,
502 	MLXCX_WQ_TYPE_RECVQ
503 } mlxcx_workq_type_t;
504 
505 typedef struct mlxcx_ring_group mlxcx_ring_group_t;
506 
507 struct mlxcx_work_queue {
508 	kmutex_t			mlwq_mtx;
509 	mlxcx_t				*mlwq_mlx;
510 	mlxcx_workq_type_t		mlwq_type;
511 	mlxcx_workq_state_t		mlwq_state;
512 
513 	list_node_t			mlwq_entry;
514 	list_node_t			mlwq_group_entry;
515 
516 	mlxcx_ring_group_t		*mlwq_group;
517 
518 	uint_t				mlwq_num;
519 
520 	mlxcx_completion_queue_t	*mlwq_cq;
521 	mlxcx_pd_t			*mlwq_pd;
522 
523 	/* Required for send queues */
524 	mlxcx_tis_t			*mlwq_tis;
525 
526 	/* UAR region that has this WQ's blueflame buffers in it */
527 	mlxcx_uar_t			*mlwq_uar;
528 
529 	mlxcx_dma_buffer_t		mlwq_dma;
530 
531 	mlxcx_eth_inline_mode_t		mlwq_inline_mode;
532 	size_t				mlwq_entshift;
533 	size_t				mlwq_nents;
534 	/* Discriminate based on mwq_type */
535 	union {
536 		mlxcx_sendq_ent_t	*mlwq_send_ent;
537 		mlxcx_sendq_extra_ent_t	*mlwq_send_extra_ent;
538 		mlxcx_recvq_ent_t	*mlwq_recv_ent;
539 		mlxcx_sendq_bf_t	*mlwq_bf_ent;
540 	};
541 	uint64_t			mlwq_pc;	/* producer counter */
542 
543 	mlxcx_dma_buffer_t		mlwq_doorbell_dma;
544 	mlxcx_workq_doorbell_t		*mlwq_doorbell;
545 
546 	mlxcx_buf_shard_t		*mlwq_bufs;
547 	mlxcx_buf_shard_t		*mlwq_foreign_bufs;
548 
549 	boolean_t			mlwq_fm_repd_qstate;
550 };
551 
552 #define	MLXCX_RQT_MAX_SIZE		64
553 
554 typedef enum {
555 	MLXCX_RQT_CREATED		= 1 << 0,
556 	MLXCX_RQT_DESTROYED		= 1 << 1,
557 	MLXCX_RQT_DIRTY			= 1 << 2,
558 } mlxcx_rqtable_state_t;
559 
560 typedef struct mlxcx_rqtable {
561 	mlxcx_rqtable_state_t		mlrqt_state;
562 	list_node_t			mlrqt_entry;
563 	uint_t				mlrqt_num;
564 
565 	size_t				mlrqt_max;
566 	size_t				mlrqt_used;
567 
568 	size_t				mlrqt_rq_size;
569 	mlxcx_work_queue_t		**mlrqt_rq;
570 } mlxcx_rqtable_t;
571 
572 typedef enum {
573 	MLXCX_TIR_CREATED		= 1 << 0,
574 	MLXCX_TIR_DESTROYED		= 1 << 1,
575 } mlxcx_tir_state_t;
576 
577 typedef struct mlxcx_tir {
578 	mlxcx_tir_state_t		mltir_state;
579 	list_node_t			mltir_entry;
580 	uint_t				mltir_num;
581 	mlxcx_tdom_t			*mltir_tdom;
582 	mlxcx_tir_type_t		mltir_type;
583 	union {
584 		mlxcx_rqtable_t			*mltir_rqtable;
585 		mlxcx_work_queue_t		*mltir_rq;
586 	};
587 	mlxcx_tir_hash_fn_t		mltir_hash_fn;
588 	uint8_t				mltir_toeplitz_key[40];
589 	mlxcx_tir_rx_hash_l3_type_t	mltir_l3_type;
590 	mlxcx_tir_rx_hash_l4_type_t	mltir_l4_type;
591 	mlxcx_tir_rx_hash_fields_t	mltir_hash_fields;
592 } mlxcx_tir_t;
593 
594 typedef enum {
595 	MLXCX_FLOW_GROUP_CREATED	= 1 << 0,
596 	MLXCX_FLOW_GROUP_BUSY		= 1 << 1,
597 	MLXCX_FLOW_GROUP_DESTROYED	= 1 << 2,
598 } mlxcx_flow_group_state_t;
599 
600 typedef enum {
601 	MLXCX_FLOW_MATCH_SMAC		= 1 << 0,
602 	MLXCX_FLOW_MATCH_DMAC		= 1 << 1,
603 	MLXCX_FLOW_MATCH_VLAN		= 1 << 2,
604 	MLXCX_FLOW_MATCH_VID		= 1 << 3,
605 	MLXCX_FLOW_MATCH_IP_VER		= 1 << 4,
606 	MLXCX_FLOW_MATCH_SRCIP		= 1 << 5,
607 	MLXCX_FLOW_MATCH_DSTIP		= 1 << 6,
608 	MLXCX_FLOW_MATCH_IP_PROTO	= 1 << 7,
609 	MLXCX_FLOW_MATCH_SQN		= 1 << 8,
610 	MLXCX_FLOW_MATCH_VXLAN		= 1 << 9,
611 } mlxcx_flow_mask_t;
612 
613 struct mlxcx_flow_group {
614 	list_node_t			mlfg_entry;
615 	list_node_t			mlfg_role_entry;
616 	mlxcx_flow_group_state_t	mlfg_state;
617 	mlxcx_flow_table_t		*mlfg_table;
618 	uint_t				mlfg_num;
619 	size_t				mlfg_start_idx;
620 	size_t				mlfg_size;
621 	size_t				mlfg_avail;
622 	list_t				mlfg_entries;
623 	mlxcx_flow_mask_t		mlfg_mask;
624 };
625 
626 typedef enum {
627 	MLXCX_FLOW_ENTRY_RESERVED	= 1 << 0,
628 	MLXCX_FLOW_ENTRY_CREATED	= 1 << 1,
629 	MLXCX_FLOW_ENTRY_DELETED	= 1 << 2,
630 	MLXCX_FLOW_ENTRY_DIRTY		= 1 << 3,
631 } mlxcx_flow_entry_state_t;
632 
633 typedef struct {
634 	mlxcx_tir_t			*mlfed_tir;
635 	mlxcx_flow_table_t		*mlfed_flow;
636 } mlxcx_flow_entry_dest_t;
637 
638 typedef struct mlxcx_flow_entry {
639 	list_node_t			mlfe_group_entry;
640 	avl_node_t			mlfe_dmac_entry;
641 	mlxcx_flow_entry_state_t	mlfe_state;
642 	mlxcx_flow_table_t		*mlfe_table;
643 	mlxcx_flow_group_t		*mlfe_group;
644 	uint_t				mlfe_index;
645 
646 	mlxcx_flow_action_t		mlfe_action;
647 
648 	/* Criteria for match */
649 	uint8_t				mlfe_smac[ETHERADDRL];
650 	uint8_t				mlfe_dmac[ETHERADDRL];
651 
652 	mlxcx_vlan_type_t		mlfe_vlan_type;
653 	uint16_t			mlfe_vid;
654 
655 	uint_t				mlfe_ip_version;
656 	uint8_t				mlfe_srcip[IPV6_ADDR_LEN];
657 	uint8_t				mlfe_dstip[IPV6_ADDR_LEN];
658 
659 	uint_t				mlfe_ip_proto;
660 	uint16_t			mlfe_sport;
661 	uint16_t			mlfe_dport;
662 
663 	uint32_t			mlfe_sqn;
664 	uint32_t			mlfe_vxlan_vni;
665 
666 	/* Destinations */
667 	size_t				mlfe_ndest;
668 	mlxcx_flow_entry_dest_t		mlfe_dest[MLXCX_FLOW_MAX_DESTINATIONS];
669 
670 	/*
671 	 * mlxcx_group_mac_ts joining this entry to N ring groups
672 	 * only used by FEs on the root rx flow table
673 	 */
674 	list_t				mlfe_ring_groups;
675 } mlxcx_flow_entry_t;
676 
677 typedef enum {
678 	MLXCX_FLOW_TABLE_CREATED	= 1 << 0,
679 	MLXCX_FLOW_TABLE_DESTROYED	= 1 << 1,
680 	MLXCX_FLOW_TABLE_ROOT		= 1 << 2
681 } mlxcx_flow_table_state_t;
682 
683 struct mlxcx_flow_table {
684 	kmutex_t			mlft_mtx;
685 	mlxcx_flow_table_state_t	mlft_state;
686 	uint_t				mlft_level;
687 	uint_t				mlft_num;
688 	mlxcx_flow_table_type_t		mlft_type;
689 
690 	mlxcx_port_t			*mlft_port;
691 
692 	size_t				mlft_entshift;
693 	size_t				mlft_nents;
694 
695 	size_t				mlft_entsize;
696 	mlxcx_flow_entry_t		*mlft_ent;
697 
698 	/* First entry not yet claimed by a group */
699 	size_t				mlft_next_ent;
700 
701 	list_t				mlft_groups;
702 };
703 
704 typedef enum {
705 	MLXCX_GROUP_RX,
706 	MLXCX_GROUP_TX
707 } mlxcx_group_type_t;
708 
709 typedef enum {
710 	MLXCX_GROUP_INIT		= 1 << 0,
711 	MLXCX_GROUP_WQS			= 1 << 1,
712 	MLXCX_GROUP_TIRTIS		= 1 << 2,
713 	MLXCX_GROUP_FLOWS		= 1 << 3,
714 	MLXCX_GROUP_RUNNING		= 1 << 4,
715 	MLXCX_GROUP_RQT			= 1 << 5,
716 } mlxcx_group_state_t;
717 
718 #define	MLXCX_RX_HASH_FT_SIZE_SHIFT	4
719 
720 typedef enum {
721 	MLXCX_TIR_ROLE_IPv4 = 0,
722 	MLXCX_TIR_ROLE_IPv6,
723 	MLXCX_TIR_ROLE_TCPv4,
724 	MLXCX_TIR_ROLE_TCPv6,
725 	MLXCX_TIR_ROLE_UDPv4,
726 	MLXCX_TIR_ROLE_UDPv6,
727 	MLXCX_TIR_ROLE_OTHER,
728 
729 	MLXCX_TIRS_PER_GROUP
730 } mlxcx_tir_role_t;
731 
732 typedef struct {
733 	avl_node_t		mlgm_group_entry;
734 	list_node_t		mlgm_fe_entry;
735 	mlxcx_ring_group_t	*mlgm_group;
736 	uint8_t			mlgm_mac[6];
737 	mlxcx_flow_entry_t	*mlgm_fe;
738 } mlxcx_group_mac_t;
739 
740 typedef struct {
741 	list_node_t		mlgv_entry;
742 	boolean_t		mlgv_tagged;
743 	uint16_t		mlgv_vid;
744 	mlxcx_flow_entry_t	*mlgv_fe;
745 } mlxcx_group_vlan_t;
746 
747 struct mlxcx_ring_group {
748 	kmutex_t			mlg_mtx;
749 	mlxcx_t				*mlg_mlx;
750 	mlxcx_group_state_t		mlg_state;
751 	mlxcx_group_type_t		mlg_type;
752 
753 	mac_group_handle_t		mlg_mac_hdl;
754 
755 	union {
756 		mlxcx_tis_t		mlg_tis;
757 		mlxcx_tir_t		mlg_tir[MLXCX_TIRS_PER_GROUP];
758 	};
759 	mlxcx_port_t			*mlg_port;
760 
761 	size_t				mlg_nwqs;
762 	size_t				mlg_wqs_size;
763 	mlxcx_work_queue_t		*mlg_wqs;
764 
765 	mlxcx_rqtable_t			*mlg_rqt;
766 
767 	/*
768 	 * Flow table for matching VLAN IDs
769 	 */
770 	mlxcx_flow_table_t		*mlg_rx_vlan_ft;
771 	mlxcx_flow_group_t		*mlg_rx_vlan_fg;
772 	mlxcx_flow_group_t		*mlg_rx_vlan_def_fg;
773 	mlxcx_flow_group_t		*mlg_rx_vlan_promisc_fg;
774 	list_t				mlg_rx_vlans;
775 
776 	/*
777 	 * Flow table for separating out by protocol before hashing
778 	 */
779 	mlxcx_flow_table_t		*mlg_rx_hash_ft;
780 
781 	/*
782 	 * Links to flow entries on the root flow table which are pointing to
783 	 * our rx_vlan_ft.
784 	 */
785 	avl_tree_t			mlg_rx_macs;
786 };
787 
788 typedef enum mlxcx_cmd_state {
789 	MLXCX_CMD_S_DONE	= 1 << 0,
790 	MLXCX_CMD_S_ERROR	= 1 << 1
791 } mlxcx_cmd_state_t;
792 
793 typedef struct mlxcx_cmd {
794 	struct mlxcx		*mlcmd_mlxp;
795 	kmutex_t		mlcmd_lock;
796 	kcondvar_t		mlcmd_cv;
797 
798 	uint8_t			mlcmd_token;
799 	mlxcx_cmd_op_t		mlcmd_op;
800 
801 	/*
802 	 * Command data and extended mailboxes for responses.
803 	 */
804 	const void		*mlcmd_in;
805 	uint32_t		mlcmd_inlen;
806 	void			*mlcmd_out;
807 	uint32_t		mlcmd_outlen;
808 	list_t			mlcmd_mbox_in;
809 	uint8_t			mlcmd_nboxes_in;
810 	list_t			mlcmd_mbox_out;
811 	uint8_t			mlcmd_nboxes_out;
812 	/*
813 	 * Status information.
814 	 */
815 	mlxcx_cmd_state_t	mlcmd_state;
816 	uint8_t			mlcmd_status;
817 } mlxcx_cmd_t;
818 
819 /*
820  * Our view of capabilities.
821  */
822 typedef struct mlxcx_hca_cap {
823 	mlxcx_hca_cap_mode_t	mhc_mode;
824 	mlxcx_hca_cap_type_t	mhc_type;
825 	union {
826 		uint8_t				mhc_bulk[MLXCX_HCA_CAP_SIZE];
827 		mlxcx_hca_cap_general_caps_t	mhc_general;
828 		mlxcx_hca_cap_eth_caps_t	mhc_eth;
829 		mlxcx_hca_cap_flow_caps_t	mhc_flow;
830 	};
831 } mlxcx_hca_cap_t;
832 
833 typedef struct {
834 	/* Cooked values */
835 	boolean_t		mlc_checksum;
836 	boolean_t		mlc_lso;
837 	boolean_t		mlc_vxlan;
838 	size_t			mlc_max_lso_size;
839 	size_t			mlc_max_rqt_size;
840 
841 	size_t			mlc_max_rx_ft_shift;
842 	size_t			mlc_max_rx_fe_dest;
843 	size_t			mlc_max_rx_flows;
844 
845 	size_t			mlc_max_tir;
846 
847 	/* Raw caps data */
848 	mlxcx_hca_cap_t		mlc_hca_cur;
849 	mlxcx_hca_cap_t		mlc_hca_max;
850 	mlxcx_hca_cap_t		mlc_ether_cur;
851 	mlxcx_hca_cap_t		mlc_ether_max;
852 	mlxcx_hca_cap_t		mlc_nic_flow_cur;
853 	mlxcx_hca_cap_t		mlc_nic_flow_max;
854 } mlxcx_caps_t;
855 
856 typedef struct {
857 	uint_t			mldp_eq_size_shift;
858 	uint_t			mldp_cq_size_shift;
859 	uint_t			mldp_rq_size_shift;
860 	uint_t			mldp_sq_size_shift;
861 	uint_t			mldp_cqemod_period_usec;
862 	uint_t			mldp_cqemod_count;
863 	uint_t			mldp_intrmod_period_usec;
864 	uint_t			mldp_rx_ngroups_large;
865 	uint_t			mldp_rx_ngroups_small;
866 	uint_t			mldp_rx_nrings_per_large_group;
867 	uint_t			mldp_rx_nrings_per_small_group;
868 	uint_t			mldp_tx_ngroups;
869 	uint_t			mldp_tx_nrings_per_group;
870 	uint_t			mldp_ftbl_root_size_shift;
871 	size_t			mldp_tx_bind_threshold;
872 	uint_t			mldp_ftbl_vlan_size_shift;
873 	uint64_t		mldp_eq_check_interval_sec;
874 	uint64_t		mldp_cq_check_interval_sec;
875 	uint64_t		mldp_wq_check_interval_sec;
876 } mlxcx_drv_props_t;
877 
878 typedef enum {
879 	MLXCX_ATTACH_FM		= 1 << 0,
880 	MLXCX_ATTACH_PCI_CONFIG	= 1 << 1,
881 	MLXCX_ATTACH_REGS	= 1 << 2,
882 	MLXCX_ATTACH_CMD	= 1 << 3,
883 	MLXCX_ATTACH_ENABLE_HCA	= 1 << 4,
884 	MLXCX_ATTACH_PAGE_LIST	= 1 << 5,
885 	MLXCX_ATTACH_INIT_HCA	= 1 << 6,
886 	MLXCX_ATTACH_UAR_PD_TD	= 1 << 7,
887 	MLXCX_ATTACH_INTRS	= 1 << 8,
888 	MLXCX_ATTACH_PORTS	= 1 << 9,
889 	MLXCX_ATTACH_MAC_HDL	= 1 << 10,
890 	MLXCX_ATTACH_CQS	= 1 << 11,
891 	MLXCX_ATTACH_WQS	= 1 << 12,
892 	MLXCX_ATTACH_GROUPS	= 1 << 13,
893 	MLXCX_ATTACH_BUFS	= 1 << 14,
894 	MLXCX_ATTACH_CAPS	= 1 << 15,
895 	MLXCX_ATTACH_CHKTIMERS	= 1 << 16,
896 } mlxcx_attach_progress_t;
897 
898 struct mlxcx {
899 	/* entry on the mlxcx_glist */
900 	list_node_t		mlx_gentry;
901 
902 	dev_info_t		*mlx_dip;
903 	int			mlx_inst;
904 	mlxcx_attach_progress_t	mlx_attach;
905 
906 	mlxcx_drv_props_t	mlx_props;
907 
908 	/*
909 	 * Misc. data
910 	 */
911 	uint16_t		mlx_fw_maj;
912 	uint16_t		mlx_fw_min;
913 	uint16_t		mlx_fw_rev;
914 	uint16_t		mlx_cmd_rev;
915 
916 	/*
917 	 * Various capabilities of hardware.
918 	 */
919 	mlxcx_caps_t		*mlx_caps;
920 
921 	uint_t			mlx_max_sdu;
922 	uint_t			mlx_sdu;
923 
924 	/*
925 	 * FM State
926 	 */
927 	int			mlx_fm_caps;
928 
929 	/*
930 	 * PCI Data
931 	 */
932 	ddi_acc_handle_t	mlx_cfg_handle;
933 	ddi_acc_handle_t	mlx_regs_handle;
934 	caddr_t			mlx_regs_base;
935 
936 	/*
937 	 * MAC handle
938 	 */
939 	mac_handle_t		mlx_mac_hdl;
940 
941 	/*
942 	 * Main command queue for issuing general FW control commands.
943 	 */
944 	mlxcx_cmd_queue_t	mlx_cmd;
945 
946 	/*
947 	 * Interrupts
948 	 */
949 	uint_t			mlx_intr_pri;
950 	uint_t			mlx_intr_type;		/* always MSI-X */
951 	int			mlx_intr_count;
952 	size_t			mlx_intr_size;		/* allocation size */
953 	ddi_intr_handle_t	*mlx_intr_handles;
954 
955 	/*
956 	 * Basic firmware resources which we use for a variety of things.
957 	 * The UAR is a reference to a page where CQ and EQ doorbells are
958 	 * located. It also holds all the BlueFlame stuff (which we don't
959 	 * use).
960 	 */
961 	mlxcx_uar_t		mlx_uar;
962 	/*
963 	 * The PD (Protection Domain) and TDOM (Transport Domain) are opaque
964 	 * entities to us (they're Infiniband constructs we don't actually care
965 	 * about) -- we just allocate them and shove their ID numbers in
966 	 * whenever we're asked for one.
967 	 *
968 	 * The "reserved" LKEY is what we should put in queue entries that
969 	 * have references to memory to indicate that they're using linear
970 	 * addresses (comes from the QUERY_SPECIAL_CONTEXTS cmd).
971 	 */
972 	mlxcx_pd_t		mlx_pd;
973 	mlxcx_tdom_t		mlx_tdom;
974 	uint_t			mlx_rsvd_lkey;
975 
976 	/*
977 	 * Our event queues. These are 1:1 with interrupts.
978 	 */
979 	size_t			mlx_eqs_size;		/* allocation size */
980 	mlxcx_event_queue_t	*mlx_eqs;
981 
982 	/*
983 	 * Page list. These represent the set of 4k pages we've given to
984 	 * hardware.
985 	 *
986 	 * We can add to this list at the request of hardware from interrupt
987 	 * context (the PAGE_REQUEST event), so it's protected by pagemtx.
988 	 */
989 	kmutex_t		mlx_pagemtx;
990 	uint_t			mlx_npages;
991 	avl_tree_t		mlx_pages;
992 
993 	/*
994 	 * Port state
995 	 */
996 	uint_t			mlx_nports;
997 	size_t			mlx_ports_size;
998 	mlxcx_port_t		*mlx_ports;
999 
1000 	/*
1001 	 * Completion queues (CQs). These are also indexed off the
1002 	 * event_queue_ts that they each report to.
1003 	 */
1004 	list_t			mlx_cqs;
1005 
1006 	uint_t			mlx_next_eq;
1007 
1008 	/*
1009 	 * Work queues (WQs).
1010 	 */
1011 	list_t			mlx_wqs;
1012 
1013 	/*
1014 	 * Ring groups
1015 	 */
1016 	size_t			mlx_rx_ngroups;
1017 	size_t			mlx_rx_groups_size;
1018 	mlxcx_ring_group_t	*mlx_rx_groups;
1019 
1020 	size_t			mlx_tx_ngroups;
1021 	size_t			mlx_tx_groups_size;
1022 	mlxcx_ring_group_t	*mlx_tx_groups;
1023 
1024 	kmem_cache_t		*mlx_bufs_cache;
1025 	list_t			mlx_buf_shards;
1026 
1027 	ddi_periodic_t		mlx_eq_checktimer;
1028 	ddi_periodic_t		mlx_cq_checktimer;
1029 	ddi_periodic_t		mlx_wq_checktimer;
1030 };
1031 
1032 /*
1033  * Register access
1034  */
1035 extern uint16_t mlxcx_get16(mlxcx_t *, uintptr_t);
1036 extern uint32_t mlxcx_get32(mlxcx_t *, uintptr_t);
1037 extern uint64_t mlxcx_get64(mlxcx_t *, uintptr_t);
1038 
1039 extern void mlxcx_put32(mlxcx_t *, uintptr_t, uint32_t);
1040 extern void mlxcx_put64(mlxcx_t *, uintptr_t, uint64_t);
1041 
1042 extern void mlxcx_uar_put32(mlxcx_t *, mlxcx_uar_t *, uintptr_t, uint32_t);
1043 extern void mlxcx_uar_put64(mlxcx_t *, mlxcx_uar_t *, uintptr_t, uint64_t);
1044 
1045 /*
1046  * Logging functions.
1047  */
1048 extern void mlxcx_warn(mlxcx_t *, const char *, ...);
1049 extern void mlxcx_note(mlxcx_t *, const char *, ...);
1050 extern void mlxcx_panic(mlxcx_t *, const char *, ...);
1051 
1052 extern void mlxcx_fm_ereport(mlxcx_t *, const char *);
1053 
1054 extern void mlxcx_check_sq(mlxcx_t *, mlxcx_work_queue_t *);
1055 extern void mlxcx_check_rq(mlxcx_t *, mlxcx_work_queue_t *);
1056 
1057 /*
1058  * DMA Functions
1059  */
1060 extern void mlxcx_dma_free(mlxcx_dma_buffer_t *);
1061 extern boolean_t mlxcx_dma_alloc(mlxcx_t *, mlxcx_dma_buffer_t *,
1062     ddi_dma_attr_t *, ddi_device_acc_attr_t *, boolean_t, size_t, boolean_t);
1063 extern boolean_t mlxcx_dma_init(mlxcx_t *, mlxcx_dma_buffer_t *,
1064     ddi_dma_attr_t *, boolean_t);
1065 extern boolean_t mlxcx_dma_bind_mblk(mlxcx_t *, mlxcx_dma_buffer_t *,
1066     const mblk_t *, size_t, boolean_t);
1067 extern boolean_t mlxcx_dma_alloc_offset(mlxcx_t *, mlxcx_dma_buffer_t *,
1068     ddi_dma_attr_t *, ddi_device_acc_attr_t *, boolean_t,
1069     size_t, size_t, boolean_t);
1070 extern void mlxcx_dma_unbind(mlxcx_t *, mlxcx_dma_buffer_t *);
1071 extern void mlxcx_dma_acc_attr(mlxcx_t *, ddi_device_acc_attr_t *);
1072 extern void mlxcx_dma_page_attr(mlxcx_t *, ddi_dma_attr_t *);
1073 extern void mlxcx_dma_queue_attr(mlxcx_t *, ddi_dma_attr_t *);
1074 extern void mlxcx_dma_qdbell_attr(mlxcx_t *, ddi_dma_attr_t *);
1075 extern void mlxcx_dma_buf_attr(mlxcx_t *, ddi_dma_attr_t *);
1076 
1077 extern boolean_t mlxcx_give_pages(mlxcx_t *, int32_t);
1078 
1079 static inline const ddi_dma_cookie_t *
1080 mlxcx_dma_cookie_iter(const mlxcx_dma_buffer_t *db,
1081     const ddi_dma_cookie_t *prev)
1082 {
1083 	ASSERT(db->mxdb_flags & MLXCX_DMABUF_BOUND);
1084 	return (ddi_dma_cookie_iter(db->mxdb_dma_handle, prev));
1085 }
1086 
1087 static inline const ddi_dma_cookie_t *
1088 mlxcx_dma_cookie_one(const mlxcx_dma_buffer_t *db)
1089 {
1090 	ASSERT(db->mxdb_flags & MLXCX_DMABUF_BOUND);
1091 	return (ddi_dma_cookie_one(db->mxdb_dma_handle));
1092 }
1093 
1094 /*
1095  * From mlxcx_intr.c
1096  */
1097 extern boolean_t mlxcx_intr_setup(mlxcx_t *);
1098 extern void mlxcx_intr_teardown(mlxcx_t *);
1099 extern void mlxcx_arm_eq(mlxcx_t *, mlxcx_event_queue_t *);
1100 extern void mlxcx_arm_cq(mlxcx_t *, mlxcx_completion_queue_t *);
1101 
1102 extern mblk_t *mlxcx_rx_poll(mlxcx_t *, mlxcx_completion_queue_t *, size_t);
1103 
1104 /*
1105  * From mlxcx_gld.c
1106  */
1107 extern boolean_t mlxcx_register_mac(mlxcx_t *);
1108 
1109 /*
1110  * From mlxcx_ring.c
1111  */
1112 extern boolean_t mlxcx_cq_alloc_dma(mlxcx_t *, mlxcx_completion_queue_t *);
1113 extern void mlxcx_cq_rele_dma(mlxcx_t *, mlxcx_completion_queue_t *);
1114 extern boolean_t mlxcx_wq_alloc_dma(mlxcx_t *, mlxcx_work_queue_t *);
1115 extern void mlxcx_wq_rele_dma(mlxcx_t *, mlxcx_work_queue_t *);
1116 
1117 extern boolean_t mlxcx_buf_create(mlxcx_t *, mlxcx_buf_shard_t *,
1118     mlxcx_buffer_t **);
1119 extern boolean_t mlxcx_buf_create_foreign(mlxcx_t *, mlxcx_buf_shard_t *,
1120     mlxcx_buffer_t **);
1121 extern void mlxcx_buf_take(mlxcx_t *, mlxcx_work_queue_t *, mlxcx_buffer_t **);
1122 extern size_t mlxcx_buf_take_n(mlxcx_t *, mlxcx_work_queue_t *,
1123     mlxcx_buffer_t **, size_t);
1124 extern boolean_t mlxcx_buf_loan(mlxcx_t *, mlxcx_buffer_t *);
1125 extern void mlxcx_buf_return(mlxcx_t *, mlxcx_buffer_t *);
1126 extern void mlxcx_buf_return_chain(mlxcx_t *, mlxcx_buffer_t *, boolean_t);
1127 extern void mlxcx_buf_destroy(mlxcx_t *, mlxcx_buffer_t *);
1128 
1129 extern boolean_t mlxcx_buf_bind_or_copy(mlxcx_t *, mlxcx_work_queue_t *,
1130     mblk_t *, size_t, mlxcx_buffer_t **);
1131 
1132 extern boolean_t mlxcx_rx_group_setup(mlxcx_t *, mlxcx_ring_group_t *);
1133 extern boolean_t mlxcx_tx_group_setup(mlxcx_t *, mlxcx_ring_group_t *);
1134 
1135 extern boolean_t mlxcx_rx_group_start(mlxcx_t *, mlxcx_ring_group_t *);
1136 extern boolean_t mlxcx_tx_ring_start(mlxcx_t *, mlxcx_ring_group_t *,
1137     mlxcx_work_queue_t *);
1138 extern boolean_t mlxcx_rx_ring_start(mlxcx_t *, mlxcx_ring_group_t *,
1139     mlxcx_work_queue_t *);
1140 
1141 extern boolean_t mlxcx_rq_add_buffer(mlxcx_t *, mlxcx_work_queue_t *,
1142     mlxcx_buffer_t *);
1143 extern boolean_t mlxcx_rq_add_buffers(mlxcx_t *, mlxcx_work_queue_t *,
1144     mlxcx_buffer_t **, size_t);
1145 extern boolean_t mlxcx_sq_add_buffer(mlxcx_t *, mlxcx_work_queue_t *,
1146     uint8_t *, size_t, uint32_t, mlxcx_buffer_t *);
1147 extern boolean_t mlxcx_sq_add_nop(mlxcx_t *, mlxcx_work_queue_t *);
1148 extern void mlxcx_rq_refill(mlxcx_t *, mlxcx_work_queue_t *);
1149 
1150 extern void mlxcx_teardown_groups(mlxcx_t *);
1151 extern void mlxcx_wq_teardown(mlxcx_t *, mlxcx_work_queue_t *);
1152 extern void mlxcx_cq_teardown(mlxcx_t *, mlxcx_completion_queue_t *);
1153 extern void mlxcx_teardown_rx_group(mlxcx_t *, mlxcx_ring_group_t *);
1154 extern void mlxcx_teardown_tx_group(mlxcx_t *, mlxcx_ring_group_t *);
1155 
1156 extern void mlxcx_tx_completion(mlxcx_t *, mlxcx_completion_queue_t *,
1157     mlxcx_completionq_ent_t *, mlxcx_buffer_t *);
1158 extern mblk_t *mlxcx_rx_completion(mlxcx_t *, mlxcx_completion_queue_t *,
1159     mlxcx_completionq_ent_t *, mlxcx_buffer_t *);
1160 
1161 extern mlxcx_buf_shard_t *mlxcx_mlbs_create(mlxcx_t *);
1162 
1163 /*
1164  * Flow mgmt
1165  */
1166 extern boolean_t mlxcx_add_umcast_entry(mlxcx_t *, mlxcx_port_t *,
1167     mlxcx_ring_group_t *, const uint8_t *);
1168 extern boolean_t mlxcx_remove_umcast_entry(mlxcx_t *, mlxcx_port_t *,
1169     mlxcx_ring_group_t *, const uint8_t *);
1170 extern void mlxcx_remove_all_umcast_entries(mlxcx_t *, mlxcx_port_t *,
1171     mlxcx_ring_group_t *);
1172 extern boolean_t mlxcx_setup_flow_group(mlxcx_t *, mlxcx_flow_table_t *,
1173     mlxcx_flow_group_t *);
1174 extern void mlxcx_teardown_flow_table(mlxcx_t *, mlxcx_flow_table_t *);
1175 
1176 extern void mlxcx_remove_all_vlan_entries(mlxcx_t *, mlxcx_ring_group_t *);
1177 extern boolean_t mlxcx_remove_vlan_entry(mlxcx_t *, mlxcx_ring_group_t *,
1178     boolean_t, uint16_t);
1179 extern boolean_t mlxcx_add_vlan_entry(mlxcx_t *, mlxcx_ring_group_t *,
1180     boolean_t, uint16_t);
1181 
1182 /*
1183  * Command functions
1184  */
1185 extern boolean_t mlxcx_cmd_queue_init(mlxcx_t *);
1186 extern void mlxcx_cmd_queue_fini(mlxcx_t *);
1187 
1188 extern boolean_t mlxcx_cmd_enable_hca(mlxcx_t *);
1189 extern boolean_t mlxcx_cmd_disable_hca(mlxcx_t *);
1190 
1191 extern boolean_t mlxcx_cmd_query_issi(mlxcx_t *, uint_t *);
1192 extern boolean_t mlxcx_cmd_set_issi(mlxcx_t *, uint16_t);
1193 
1194 extern boolean_t mlxcx_cmd_query_pages(mlxcx_t *, uint_t, int32_t *);
1195 extern boolean_t mlxcx_cmd_give_pages(mlxcx_t *, uint_t, int32_t,
1196     mlxcx_dev_page_t **);
1197 extern boolean_t mlxcx_cmd_return_pages(mlxcx_t *, int32_t, uint64_t *,
1198     int32_t *);
1199 
1200 extern boolean_t mlxcx_cmd_query_hca_cap(mlxcx_t *, mlxcx_hca_cap_type_t,
1201     mlxcx_hca_cap_mode_t, mlxcx_hca_cap_t *);
1202 
1203 extern boolean_t mlxcx_cmd_set_driver_version(mlxcx_t *, const char *);
1204 
1205 extern boolean_t mlxcx_cmd_init_hca(mlxcx_t *);
1206 extern boolean_t mlxcx_cmd_teardown_hca(mlxcx_t *);
1207 
1208 extern boolean_t mlxcx_cmd_alloc_uar(mlxcx_t *, mlxcx_uar_t *);
1209 extern boolean_t mlxcx_cmd_dealloc_uar(mlxcx_t *, mlxcx_uar_t *);
1210 
1211 extern boolean_t mlxcx_cmd_alloc_pd(mlxcx_t *, mlxcx_pd_t *);
1212 extern boolean_t mlxcx_cmd_dealloc_pd(mlxcx_t *, mlxcx_pd_t *);
1213 
1214 extern boolean_t mlxcx_cmd_alloc_tdom(mlxcx_t *, mlxcx_tdom_t *);
1215 extern boolean_t mlxcx_cmd_dealloc_tdom(mlxcx_t *, mlxcx_tdom_t *);
1216 
1217 extern boolean_t mlxcx_cmd_create_eq(mlxcx_t *, mlxcx_event_queue_t *);
1218 extern boolean_t mlxcx_cmd_destroy_eq(mlxcx_t *, mlxcx_event_queue_t *);
1219 extern boolean_t mlxcx_cmd_query_eq(mlxcx_t *, mlxcx_event_queue_t *,
1220     mlxcx_eventq_ctx_t *);
1221 
1222 extern boolean_t mlxcx_cmd_create_cq(mlxcx_t *, mlxcx_completion_queue_t *);
1223 extern boolean_t mlxcx_cmd_destroy_cq(mlxcx_t *, mlxcx_completion_queue_t *);
1224 extern boolean_t mlxcx_cmd_query_cq(mlxcx_t *, mlxcx_completion_queue_t *,
1225     mlxcx_completionq_ctx_t *);
1226 
1227 extern boolean_t mlxcx_cmd_create_rq(mlxcx_t *, mlxcx_work_queue_t *);
1228 extern boolean_t mlxcx_cmd_start_rq(mlxcx_t *, mlxcx_work_queue_t *);
1229 extern boolean_t mlxcx_cmd_stop_rq(mlxcx_t *, mlxcx_work_queue_t *);
1230 extern boolean_t mlxcx_cmd_destroy_rq(mlxcx_t *, mlxcx_work_queue_t *);
1231 extern boolean_t mlxcx_cmd_query_rq(mlxcx_t *, mlxcx_work_queue_t *,
1232     mlxcx_rq_ctx_t *);
1233 
1234 extern boolean_t mlxcx_cmd_create_tir(mlxcx_t *, mlxcx_tir_t *);
1235 extern boolean_t mlxcx_cmd_destroy_tir(mlxcx_t *, mlxcx_tir_t *);
1236 
1237 extern boolean_t mlxcx_cmd_create_sq(mlxcx_t *, mlxcx_work_queue_t *);
1238 extern boolean_t mlxcx_cmd_start_sq(mlxcx_t *, mlxcx_work_queue_t *);
1239 extern boolean_t mlxcx_cmd_stop_sq(mlxcx_t *, mlxcx_work_queue_t *);
1240 extern boolean_t mlxcx_cmd_destroy_sq(mlxcx_t *, mlxcx_work_queue_t *);
1241 extern boolean_t mlxcx_cmd_query_sq(mlxcx_t *, mlxcx_work_queue_t *,
1242     mlxcx_sq_ctx_t *);
1243 
1244 extern boolean_t mlxcx_cmd_create_tis(mlxcx_t *, mlxcx_tis_t *);
1245 extern boolean_t mlxcx_cmd_destroy_tis(mlxcx_t *, mlxcx_tis_t *);
1246 
1247 extern boolean_t mlxcx_cmd_query_nic_vport_ctx(mlxcx_t *, mlxcx_port_t *);
1248 extern boolean_t mlxcx_cmd_query_special_ctxs(mlxcx_t *);
1249 
1250 extern boolean_t mlxcx_cmd_modify_nic_vport_ctx(mlxcx_t *, mlxcx_port_t *,
1251     mlxcx_modify_nic_vport_ctx_fields_t);
1252 
1253 extern boolean_t mlxcx_cmd_create_flow_table(mlxcx_t *, mlxcx_flow_table_t *);
1254 extern boolean_t mlxcx_cmd_destroy_flow_table(mlxcx_t *, mlxcx_flow_table_t *);
1255 extern boolean_t mlxcx_cmd_set_flow_table_root(mlxcx_t *, mlxcx_flow_table_t *);
1256 
1257 extern boolean_t mlxcx_cmd_create_flow_group(mlxcx_t *, mlxcx_flow_group_t *);
1258 extern boolean_t mlxcx_cmd_set_flow_table_entry(mlxcx_t *,
1259     mlxcx_flow_entry_t *);
1260 extern boolean_t mlxcx_cmd_delete_flow_table_entry(mlxcx_t *,
1261     mlxcx_flow_entry_t *);
1262 extern boolean_t mlxcx_cmd_destroy_flow_group(mlxcx_t *, mlxcx_flow_group_t *);
1263 
1264 extern boolean_t mlxcx_cmd_access_register(mlxcx_t *, mlxcx_cmd_reg_opmod_t,
1265     mlxcx_register_id_t, mlxcx_register_data_t *);
1266 extern boolean_t mlxcx_cmd_query_port_mtu(mlxcx_t *, mlxcx_port_t *);
1267 extern boolean_t mlxcx_cmd_query_port_status(mlxcx_t *, mlxcx_port_t *);
1268 extern boolean_t mlxcx_cmd_query_port_speed(mlxcx_t *, mlxcx_port_t *);
1269 
1270 extern boolean_t mlxcx_cmd_set_port_mtu(mlxcx_t *, mlxcx_port_t *);
1271 
1272 extern boolean_t mlxcx_cmd_create_rqt(mlxcx_t *, mlxcx_rqtable_t *);
1273 extern boolean_t mlxcx_cmd_destroy_rqt(mlxcx_t *, mlxcx_rqtable_t *);
1274 
1275 extern boolean_t mlxcx_cmd_set_int_mod(mlxcx_t *, uint_t, uint_t);
1276 
1277 extern boolean_t mlxcx_cmd_query_module_status(mlxcx_t *, uint_t,
1278     mlxcx_module_status_t *, mlxcx_module_error_type_t *);
1279 extern boolean_t mlxcx_cmd_set_port_led(mlxcx_t *, mlxcx_port_t *, uint16_t);
1280 
1281 /* Comparator for avl_ts */
1282 extern int mlxcx_cq_compare(const void *, const void *);
1283 extern int mlxcx_dmac_fe_compare(const void *, const void *);
1284 extern int mlxcx_grmac_compare(const void *, const void *);
1285 extern int mlxcx_page_compare(const void *, const void *);
1286 
1287 extern void mlxcx_update_link_state(mlxcx_t *, mlxcx_port_t *);
1288 
1289 extern void mlxcx_eth_proto_to_string(mlxcx_eth_proto_t, char *, size_t);
1290 extern const char *mlxcx_port_status_string(mlxcx_port_status_t);
1291 
1292 extern const char *mlxcx_event_name(mlxcx_event_t);
1293 
1294 #ifdef __cplusplus
1295 }
1296 #endif
1297 
1298 #endif /* _MLXCX_H */
1299