xref: /illumos-gate/usr/src/uts/common/io/mlxcx/mlxcx_ring.c (revision 82b4190e0f86654c179e1dad46c51c6f999464ec)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2020, The University of Queensland
14  * Copyright (c) 2018, Joyent, Inc.
15  * Copyright 2020 RackTop Systems, Inc.
16  */
17 
18 /*
19  * Mellanox Connect-X 4/5/6 driver.
20  */
21 
22 #include <sys/modctl.h>
23 #include <sys/conf.h>
24 #include <sys/devops.h>
25 #include <sys/sysmacros.h>
26 #include <sys/atomic.h>
27 #include <sys/cpuvar.h>
28 #include <sys/sdt.h>
29 
30 #include <sys/pattr.h>
31 #include <sys/dlpi.h>
32 
33 #include <sys/mac_provider.h>
34 
35 #include <sys/random.h>
36 
37 #include <mlxcx.h>
38 
39 boolean_t
40 mlxcx_wq_alloc_dma(mlxcx_t *mlxp, mlxcx_work_queue_t *mlwq)
41 {
42 	ddi_device_acc_attr_t acc;
43 	ddi_dma_attr_t attr;
44 	boolean_t ret;
45 	size_t sz;
46 
47 	VERIFY0(mlwq->mlwq_state & MLXCX_WQ_ALLOC);
48 
49 	/* Receive and send queue entries might be different sizes. */
50 	switch (mlwq->mlwq_type) {
51 	case MLXCX_WQ_TYPE_SENDQ:
52 		mlwq->mlwq_entshift = mlxp->mlx_props.mldp_sq_size_shift;
53 		mlwq->mlwq_nents = (1 << mlwq->mlwq_entshift);
54 		sz = mlwq->mlwq_nents * sizeof (mlxcx_sendq_ent_t);
55 		break;
56 	case MLXCX_WQ_TYPE_RECVQ:
57 		mlwq->mlwq_entshift = mlxp->mlx_props.mldp_rq_size_shift;
58 		mlwq->mlwq_nents = (1 << mlwq->mlwq_entshift);
59 		sz = mlwq->mlwq_nents * sizeof (mlxcx_recvq_ent_t);
60 		break;
61 	default:
62 		VERIFY(0);
63 		return (B_FALSE);
64 	}
65 	ASSERT3U(sz & (MLXCX_HW_PAGE_SIZE - 1), ==, 0);
66 
67 	mlxcx_dma_acc_attr(mlxp, &acc);
68 	mlxcx_dma_queue_attr(mlxp, &attr);
69 
70 	ret = mlxcx_dma_alloc(mlxp, &mlwq->mlwq_dma, &attr, &acc,
71 	    B_TRUE, sz, B_TRUE);
72 	if (!ret) {
73 		mlxcx_warn(mlxp, "failed to allocate WQ memory");
74 		return (B_FALSE);
75 	}
76 
77 	/*
78 	 * Just set the first pointer in the union. Yes, this is a strict
79 	 * aliasing violation. No, I don't care.
80 	 */
81 	mlwq->mlwq_send_ent = (mlxcx_sendq_ent_t *)mlwq->mlwq_dma.mxdb_va;
82 
83 	mlxcx_dma_acc_attr(mlxp, &acc);
84 	mlxcx_dma_qdbell_attr(mlxp, &attr);
85 	sz = sizeof (mlxcx_workq_doorbell_t);
86 	ret = mlxcx_dma_alloc(mlxp, &mlwq->mlwq_doorbell_dma, &attr, &acc,
87 	    B_TRUE, sz, B_TRUE);
88 	if (!ret) {
89 		mlxcx_warn(mlxp, "failed to allocate WQ doorbell memory");
90 		mlxcx_dma_free(&mlwq->mlwq_dma);
91 		mlwq->mlwq_send_ent = NULL;
92 		return (B_FALSE);
93 	}
94 
95 	mlwq->mlwq_doorbell =
96 	    (mlxcx_workq_doorbell_t *)mlwq->mlwq_doorbell_dma.mxdb_va;
97 
98 	mlwq->mlwq_state |= MLXCX_WQ_ALLOC;
99 
100 	return (B_TRUE);
101 }
102 
103 void
104 mlxcx_wq_rele_dma(mlxcx_t *mlxp, mlxcx_work_queue_t *mlwq)
105 {
106 	VERIFY(mlwq->mlwq_state & MLXCX_WQ_ALLOC);
107 	if (mlwq->mlwq_state & MLXCX_WQ_CREATED)
108 		VERIFY(mlwq->mlwq_state & MLXCX_WQ_DESTROYED);
109 
110 	mlxcx_dma_free(&mlwq->mlwq_dma);
111 	mlwq->mlwq_send_ent = NULL;
112 	mlxcx_dma_free(&mlwq->mlwq_doorbell_dma);
113 	mlwq->mlwq_doorbell = NULL;
114 
115 	mlwq->mlwq_state &= ~MLXCX_CQ_ALLOC;
116 }
117 
118 static boolean_t
119 mlxcx_cq_alloc_dma(mlxcx_t *mlxp, mlxcx_completion_queue_t *mlcq,
120     uint_t ent_shift)
121 {
122 	ddi_device_acc_attr_t acc;
123 	ddi_dma_attr_t attr;
124 	boolean_t ret;
125 	size_t sz, i;
126 
127 	VERIFY0(mlcq->mlcq_state & MLXCX_EQ_ALLOC);
128 
129 	mlcq->mlcq_entshift = ent_shift;
130 	mlcq->mlcq_nents = (1 << mlcq->mlcq_entshift);
131 	sz = mlcq->mlcq_nents * sizeof (mlxcx_completionq_ent_t);
132 	ASSERT3U(sz & (MLXCX_HW_PAGE_SIZE - 1), ==, 0);
133 
134 	mlxcx_dma_acc_attr(mlxp, &acc);
135 	mlxcx_dma_queue_attr(mlxp, &attr);
136 
137 	ret = mlxcx_dma_alloc(mlxp, &mlcq->mlcq_dma, &attr, &acc,
138 	    B_TRUE, sz, B_TRUE);
139 	if (!ret) {
140 		mlxcx_warn(mlxp, "failed to allocate CQ memory");
141 		return (B_FALSE);
142 	}
143 
144 	mlcq->mlcq_ent = (mlxcx_completionq_ent_t *)mlcq->mlcq_dma.mxdb_va;
145 
146 	for (i = 0; i < mlcq->mlcq_nents; ++i) {
147 		mlcq->mlcq_ent[i].mlcqe_opcode = MLXCX_CQE_OP_INVALID;
148 		mlcq->mlcq_ent[i].mlcqe_owner = MLXCX_CQE_OWNER_INIT;
149 	}
150 
151 	mlxcx_dma_acc_attr(mlxp, &acc);
152 	mlxcx_dma_qdbell_attr(mlxp, &attr);
153 	sz = sizeof (mlxcx_completionq_doorbell_t);
154 	ret = mlxcx_dma_alloc(mlxp, &mlcq->mlcq_doorbell_dma, &attr, &acc,
155 	    B_TRUE, sz, B_TRUE);
156 	if (!ret) {
157 		mlxcx_warn(mlxp, "failed to allocate CQ doorbell memory");
158 		mlxcx_dma_free(&mlcq->mlcq_dma);
159 		mlcq->mlcq_ent = NULL;
160 		return (B_FALSE);
161 	}
162 
163 	mlcq->mlcq_doorbell =
164 	    (mlxcx_completionq_doorbell_t *)mlcq->mlcq_doorbell_dma.mxdb_va;
165 
166 	mlcq->mlcq_state |= MLXCX_CQ_ALLOC;
167 
168 	return (B_TRUE);
169 }
170 
171 static void
172 mlxcx_cq_rele_dma(mlxcx_t *mlxp, mlxcx_completion_queue_t *mlcq)
173 {
174 	VERIFY(mlcq->mlcq_state & MLXCX_CQ_ALLOC);
175 	if (mlcq->mlcq_state & MLXCX_CQ_CREATED)
176 		VERIFY(mlcq->mlcq_state & MLXCX_CQ_DESTROYED);
177 
178 	mlxcx_dma_free(&mlcq->mlcq_dma);
179 	mlcq->mlcq_ent = NULL;
180 	mlxcx_dma_free(&mlcq->mlcq_doorbell_dma);
181 	mlcq->mlcq_doorbell = NULL;
182 
183 	mlcq->mlcq_state &= ~MLXCX_CQ_ALLOC;
184 }
185 
186 void
187 mlxcx_wq_teardown(mlxcx_t *mlxp, mlxcx_work_queue_t *mlwq)
188 {
189 	mlxcx_completion_queue_t *mlcq;
190 
191 	/*
192 	 * If something is holding the lock on a long operation like a
193 	 * refill, setting this flag asks them to exit early if possible.
194 	 */
195 	atomic_or_uint(&mlwq->mlwq_state, MLXCX_WQ_TEARDOWN);
196 
197 	mutex_enter(&mlwq->mlwq_mtx);
198 
199 	list_remove(&mlxp->mlx_wqs, mlwq);
200 
201 	if ((mlwq->mlwq_state & MLXCX_WQ_CREATED) &&
202 	    !(mlwq->mlwq_state & MLXCX_WQ_DESTROYED)) {
203 		if (mlwq->mlwq_type == MLXCX_WQ_TYPE_RECVQ &&
204 		    mlwq->mlwq_state & MLXCX_WQ_STARTED &&
205 		    !mlxcx_cmd_stop_rq(mlxp, mlwq)) {
206 			mlxcx_warn(mlxp, "failed to stop "
207 			    "recv queue num %x", mlwq->mlwq_num);
208 		}
209 		if (mlwq->mlwq_type == MLXCX_WQ_TYPE_SENDQ &&
210 		    mlwq->mlwq_state & MLXCX_WQ_STARTED &&
211 		    !mlxcx_cmd_stop_sq(mlxp, mlwq)) {
212 			mlxcx_warn(mlxp, "failed to stop "
213 			    "send queue num %x", mlwq->mlwq_num);
214 		}
215 		if (mlwq->mlwq_type == MLXCX_WQ_TYPE_RECVQ &&
216 		    !mlxcx_cmd_destroy_rq(mlxp, mlwq)) {
217 			mlxcx_warn(mlxp, "failed to destroy "
218 			    "recv queue num %x", mlwq->mlwq_num);
219 		}
220 		if (mlwq->mlwq_type == MLXCX_WQ_TYPE_SENDQ &&
221 		    !mlxcx_cmd_destroy_sq(mlxp, mlwq)) {
222 			mlxcx_warn(mlxp, "failed to destroy "
223 			    "send queue num %x", mlwq->mlwq_num);
224 		}
225 	}
226 	if (mlwq->mlwq_state & MLXCX_WQ_ALLOC) {
227 		mlxcx_wq_rele_dma(mlxp, mlwq);
228 	}
229 	mlcq = mlwq->mlwq_cq;
230 
231 	/* These will be released by mlxcx_teardown_bufs() */
232 	mlwq->mlwq_bufs = NULL;
233 	mlwq->mlwq_foreign_bufs = NULL;
234 
235 	mutex_exit(&mlwq->mlwq_mtx);
236 
237 	mutex_enter(&mlcq->mlcq_mtx);
238 	mutex_enter(&mlwq->mlwq_mtx);
239 	ASSERT3P(mlcq->mlcq_wq, ==, mlwq);
240 	mlcq->mlcq_wq = NULL;
241 	mutex_exit(&mlwq->mlwq_mtx);
242 	mutex_exit(&mlcq->mlcq_mtx);
243 
244 	mutex_destroy(&mlwq->mlwq_mtx);
245 }
246 
247 void
248 mlxcx_cq_teardown(mlxcx_t *mlxp, mlxcx_completion_queue_t *mlcq)
249 {
250 	mlxcx_event_queue_t *mleq;
251 	mlxcx_buffer_t *b;
252 
253 	/*
254 	 * If something is holding the lock on a long operation like polling
255 	 * which we're going to abort anyway, this flag asks them to exit
256 	 * early if possible.
257 	 */
258 	atomic_or_uint(&mlcq->mlcq_state, MLXCX_CQ_TEARDOWN);
259 
260 	mutex_enter(&mlcq->mlcq_mtx);
261 
262 	list_remove(&mlxp->mlx_cqs, mlcq);
263 
264 	if ((mlcq->mlcq_state & MLXCX_CQ_CREATED) &&
265 	    !(mlcq->mlcq_state & MLXCX_CQ_DESTROYED)) {
266 		if (!mlxcx_cmd_destroy_cq(mlxp, mlcq)) {
267 			mlxcx_warn(mlxp, "failed to destroy "
268 			    "completion queue num %u",
269 			    mlcq->mlcq_num);
270 		}
271 	}
272 	if (mlcq->mlcq_state & MLXCX_CQ_ALLOC) {
273 		mlxcx_cq_rele_dma(mlxp, mlcq);
274 	}
275 	/*
276 	 * If we're on an EQ AVL tree, then we need to grab
277 	 * the EQ's mutex to take it off. The ISR always takes
278 	 * EQ mutex before CQ mutex, so we have to let go of
279 	 * the CQ mutex then come back again.
280 	 *
281 	 * The ISR will bail out if tries to touch this CQ now since
282 	 * we added the CQ_DESTROYED flag above.
283 	 */
284 	if (mlcq->mlcq_state & MLXCX_CQ_EQAVL) {
285 		mleq = mlcq->mlcq_eq;
286 	} else {
287 		mleq = NULL;
288 	}
289 
290 	/* Return any outstanding buffers to the free pool. */
291 	while ((b = list_remove_head(&mlcq->mlcq_buffers)) != NULL) {
292 		mlxcx_buf_return_chain(mlxp, b, B_FALSE);
293 	}
294 	mutex_enter(&mlcq->mlcq_bufbmtx);
295 	while ((b = list_remove_head(&mlcq->mlcq_buffers_b)) != NULL) {
296 		mlxcx_buf_return_chain(mlxp, b, B_FALSE);
297 	}
298 	mutex_exit(&mlcq->mlcq_bufbmtx);
299 
300 	/*
301 	 * Since the interrupt handlers take the EQ lock before the CQ one,
302 	 * we must do the same here. That means letting go of the lock
303 	 * for a brief window here (we'll double-check the state when we
304 	 * get back in).
305 	 */
306 	mutex_exit(&mlcq->mlcq_mtx);
307 
308 	if (mleq != NULL) {
309 		mutex_enter(&mleq->mleq_mtx);
310 		mutex_enter(&mlcq->mlcq_mtx);
311 		/*
312 		 * Double-check the state, we let go of the
313 		 * mutex briefly.
314 		 */
315 		if (mlcq->mlcq_state & MLXCX_CQ_EQAVL) {
316 			avl_remove(&mleq->mleq_cqs, mlcq);
317 			mlcq->mlcq_state &= ~MLXCX_CQ_EQAVL;
318 		}
319 		mutex_exit(&mlcq->mlcq_mtx);
320 		mutex_exit(&mleq->mleq_mtx);
321 	}
322 
323 	mutex_enter(&mlcq->mlcq_mtx);
324 	ASSERT0(mlcq->mlcq_state & ~(MLXCX_CQ_CREATED | MLXCX_CQ_DESTROYED |
325 	    MLXCX_CQ_TEARDOWN | MLXCX_CQ_ARMED));
326 	mutex_exit(&mlcq->mlcq_mtx);
327 
328 	mutex_destroy(&mlcq->mlcq_mtx);
329 	mutex_destroy(&mlcq->mlcq_bufbmtx);
330 	list_destroy(&mlcq->mlcq_buffers);
331 	list_destroy(&mlcq->mlcq_buffers_b);
332 	kmem_free(mlcq, sizeof (mlxcx_completion_queue_t));
333 }
334 
335 static boolean_t
336 mlxcx_cq_setup(mlxcx_t *mlxp, mlxcx_event_queue_t *eq,
337     mlxcx_completion_queue_t **cqp, uint_t ent_shift)
338 {
339 	mlxcx_completion_queue_t *cq;
340 
341 	cq = kmem_zalloc(sizeof (mlxcx_completion_queue_t), KM_SLEEP);
342 	mutex_init(&cq->mlcq_mtx, NULL, MUTEX_DRIVER,
343 	    DDI_INTR_PRI(mlxp->mlx_intr_pri));
344 	mutex_init(&cq->mlcq_bufbmtx, NULL, MUTEX_DRIVER,
345 	    DDI_INTR_PRI(mlxp->mlx_intr_pri));
346 	list_create(&cq->mlcq_buffers, sizeof (mlxcx_buffer_t),
347 	    offsetof(mlxcx_buffer_t, mlb_cq_entry));
348 	list_create(&cq->mlcq_buffers_b, sizeof (mlxcx_buffer_t),
349 	    offsetof(mlxcx_buffer_t, mlb_cq_entry));
350 
351 	cq->mlcq_mlx = mlxp;
352 	list_insert_tail(&mlxp->mlx_cqs, cq);
353 
354 	mutex_enter(&cq->mlcq_mtx);
355 
356 	if (!mlxcx_cq_alloc_dma(mlxp, cq, ent_shift)) {
357 		mutex_exit(&cq->mlcq_mtx);
358 		return (B_FALSE);
359 	}
360 
361 	cq->mlcq_bufhwm = cq->mlcq_nents - MLXCX_CQ_HWM_GAP;
362 	cq->mlcq_buflwm = cq->mlcq_nents - MLXCX_CQ_LWM_GAP;
363 
364 	cq->mlcq_uar = &mlxp->mlx_uar;
365 	cq->mlcq_eq = eq;
366 
367 	cq->mlcq_cqemod_period_usec = mlxp->mlx_props.mldp_cqemod_period_usec;
368 	cq->mlcq_cqemod_count = mlxp->mlx_props.mldp_cqemod_count;
369 
370 	if (!mlxcx_cmd_create_cq(mlxp, cq)) {
371 		mutex_exit(&cq->mlcq_mtx);
372 		return (B_FALSE);
373 	}
374 
375 	mutex_exit(&cq->mlcq_mtx);
376 
377 	mutex_enter(&eq->mleq_mtx);
378 	mutex_enter(&cq->mlcq_mtx);
379 	ASSERT0(cq->mlcq_state & MLXCX_CQ_EQAVL);
380 	avl_add(&eq->mleq_cqs, cq);
381 	cq->mlcq_state |= MLXCX_CQ_EQAVL;
382 	mlxcx_arm_cq(mlxp, cq);
383 	mutex_exit(&cq->mlcq_mtx);
384 	mutex_exit(&eq->mleq_mtx);
385 
386 	*cqp = cq;
387 	return (B_TRUE);
388 }
389 
390 static boolean_t
391 mlxcx_rq_setup(mlxcx_t *mlxp, mlxcx_completion_queue_t *cq,
392     mlxcx_work_queue_t *wq)
393 {
394 	mutex_init(&wq->mlwq_mtx, NULL, MUTEX_DRIVER,
395 	    DDI_INTR_PRI(mlxp->mlx_intr_pri));
396 
397 	list_insert_tail(&mlxp->mlx_wqs, wq);
398 
399 	mutex_enter(&wq->mlwq_mtx);
400 
401 	wq->mlwq_mlx = mlxp;
402 	wq->mlwq_type = MLXCX_WQ_TYPE_RECVQ;
403 	wq->mlwq_cq = cq;
404 	wq->mlwq_pd = &mlxp->mlx_pd;
405 	wq->mlwq_uar = &mlxp->mlx_uar;
406 
407 	wq->mlwq_bufs = mlxcx_mlbs_create(mlxp);
408 
409 	if (!mlxcx_wq_alloc_dma(mlxp, wq)) {
410 		mutex_exit(&wq->mlwq_mtx);
411 		return (B_FALSE);
412 	}
413 
414 	if (!mlxcx_cmd_create_rq(mlxp, wq)) {
415 		mutex_exit(&wq->mlwq_mtx);
416 		return (B_FALSE);
417 	}
418 
419 	wq->mlwq_bufhwm = wq->mlwq_nents - MLXCX_WQ_HWM_GAP;
420 	wq->mlwq_buflwm = wq->mlwq_nents - MLXCX_WQ_LWM_GAP;
421 
422 	mutex_exit(&wq->mlwq_mtx);
423 
424 	mutex_enter(&cq->mlcq_mtx);
425 	mutex_enter(&wq->mlwq_mtx);
426 	ASSERT3P(cq->mlcq_wq, ==, NULL);
427 	cq->mlcq_wq = wq;
428 	mutex_exit(&wq->mlwq_mtx);
429 	mutex_exit(&cq->mlcq_mtx);
430 
431 	return (B_TRUE);
432 }
433 
434 static boolean_t
435 mlxcx_sq_setup(mlxcx_t *mlxp, mlxcx_port_t *port, mlxcx_completion_queue_t *cq,
436     mlxcx_tis_t *tis, mlxcx_work_queue_t *wq)
437 {
438 	mutex_init(&wq->mlwq_mtx, NULL, MUTEX_DRIVER,
439 	    DDI_INTR_PRI(mlxp->mlx_intr_pri));
440 
441 	list_insert_tail(&mlxp->mlx_wqs, wq);
442 
443 	mutex_enter(&wq->mlwq_mtx);
444 
445 	wq->mlwq_mlx = mlxp;
446 	wq->mlwq_type = MLXCX_WQ_TYPE_SENDQ;
447 	wq->mlwq_cq = cq;
448 	wq->mlwq_pd = &mlxp->mlx_pd;
449 	wq->mlwq_uar = &mlxp->mlx_uar;
450 	wq->mlwq_tis = tis;
451 
452 	wq->mlwq_bufs = mlxcx_mlbs_create(mlxp);
453 	wq->mlwq_foreign_bufs = mlxcx_mlbs_create(mlxp);
454 
455 	VERIFY3U(port->mlp_wqe_min_inline, <=, MLXCX_ETH_INLINE_L2);
456 	wq->mlwq_inline_mode = MLXCX_ETH_INLINE_L2;
457 
458 	if (!mlxcx_wq_alloc_dma(mlxp, wq)) {
459 		mutex_exit(&wq->mlwq_mtx);
460 		return (B_FALSE);
461 	}
462 
463 	if (!mlxcx_cmd_create_sq(mlxp, wq)) {
464 		mutex_exit(&wq->mlwq_mtx);
465 		return (B_FALSE);
466 	}
467 
468 	wq->mlwq_bufhwm = wq->mlwq_nents - MLXCX_WQ_HWM_GAP;
469 	wq->mlwq_buflwm = wq->mlwq_nents - MLXCX_WQ_LWM_GAP;
470 
471 	mutex_exit(&wq->mlwq_mtx);
472 
473 	mutex_enter(&cq->mlcq_mtx);
474 	mutex_enter(&wq->mlwq_mtx);
475 	ASSERT3P(cq->mlcq_wq, ==, NULL);
476 	cq->mlcq_wq = wq;
477 	mutex_exit(&wq->mlwq_mtx);
478 	mutex_exit(&cq->mlcq_mtx);
479 
480 	return (B_TRUE);
481 }
482 
483 /*
484  * Before we tear down the queues associated with the rx group,
485  * flag each cq as being torn down and wake up any tasks.
486  */
487 static void
488 mlxcx_quiesce_rx_cqs(mlxcx_t *mlxp, mlxcx_ring_group_t *g)
489 {
490 	mlxcx_work_queue_t *wq;
491 	mlxcx_completion_queue_t *cq;
492 	mlxcx_buf_shard_t *s;
493 	uint_t i;
494 
495 	mutex_enter(&g->mlg_mtx);
496 
497 	for (i = 0; i < g->mlg_nwqs; ++i) {
498 		wq = &g->mlg_wqs[i];
499 		cq = wq->mlwq_cq;
500 		if (cq != NULL) {
501 			s = wq->mlwq_bufs;
502 			mutex_enter(&s->mlbs_mtx);
503 			atomic_or_uint(&cq->mlcq_state, MLXCX_CQ_TEARDOWN);
504 			cv_broadcast(&s->mlbs_free_nonempty);
505 			mutex_exit(&s->mlbs_mtx);
506 		}
507 	}
508 
509 	mutex_exit(&g->mlg_mtx);
510 }
511 
512 void
513 mlxcx_teardown_rx_group(mlxcx_t *mlxp, mlxcx_ring_group_t *g)
514 {
515 	mlxcx_work_queue_t *wq;
516 	mlxcx_completion_queue_t *cq;
517 	mlxcx_flow_entry_t *fe;
518 	mlxcx_flow_group_t *fg;
519 	mlxcx_flow_table_t *ft;
520 	uint_t i;
521 
522 	mutex_enter(&g->mlg_port->mlp_mtx);
523 	mutex_enter(&g->mlg_mtx);
524 
525 	if (g->mlg_state & MLXCX_GROUP_FLOWS) {
526 		mlxcx_remove_all_umcast_entries(mlxp, g->mlg_port, g);
527 
528 		if (g->mlg_rx_vlan_ft != NULL)
529 			mlxcx_remove_all_vlan_entries(mlxp, g);
530 
531 		if (g == &mlxp->mlx_rx_groups[0]) {
532 			ft = g->mlg_port->mlp_rx_flow;
533 			mutex_enter(&ft->mlft_mtx);
534 
535 			fg = g->mlg_port->mlp_bcast;
536 			fe = list_head(&fg->mlfg_entries);
537 			if (fe->mlfe_state & MLXCX_FLOW_ENTRY_CREATED) {
538 				(void) mlxcx_cmd_delete_flow_table_entry(
539 				    mlxp, fe);
540 			}
541 
542 			fg = g->mlg_port->mlp_promisc;
543 			fe = list_head(&fg->mlfg_entries);
544 			if (fe->mlfe_state & MLXCX_FLOW_ENTRY_CREATED) {
545 				(void) mlxcx_cmd_delete_flow_table_entry(
546 				    mlxp, fe);
547 			}
548 
549 			mutex_exit(&ft->mlft_mtx);
550 		}
551 
552 		if (g->mlg_rx_vlan_ft != NULL) {
553 			mutex_enter(&g->mlg_rx_vlan_ft->mlft_mtx);
554 			ASSERT(list_is_empty(&g->mlg_rx_vlans));
555 			fg = g->mlg_rx_vlan_def_fg;
556 			fe = list_head(&fg->mlfg_entries);
557 			if (fe->mlfe_state & MLXCX_FLOW_ENTRY_CREATED) {
558 				(void) mlxcx_cmd_delete_flow_table_entry(
559 				    mlxp, fe);
560 			}
561 			fg = g->mlg_rx_vlan_promisc_fg;
562 			fe = list_head(&fg->mlfg_entries);
563 			if (fe->mlfe_state & MLXCX_FLOW_ENTRY_CREATED) {
564 				(void) mlxcx_cmd_delete_flow_table_entry(
565 				    mlxp, fe);
566 			}
567 			mlxcx_teardown_flow_table(mlxp, g->mlg_rx_vlan_ft);
568 			list_destroy(&g->mlg_rx_vlans);
569 
570 			g->mlg_rx_vlan_ft = NULL;
571 		}
572 
573 		mutex_enter(&g->mlg_rx_hash_ft->mlft_mtx);
574 		mlxcx_teardown_flow_table(mlxp, g->mlg_rx_hash_ft);
575 		g->mlg_rx_hash_ft = NULL;
576 
577 		avl_destroy(&g->mlg_rx_macs);
578 		g->mlg_state &= ~MLXCX_GROUP_FLOWS;
579 	}
580 
581 	if (g->mlg_state & MLXCX_GROUP_RUNNING) {
582 		for (i = 0; i < g->mlg_nwqs; ++i) {
583 			wq = &g->mlg_wqs[i];
584 			mutex_enter(&wq->mlwq_mtx);
585 			if (wq->mlwq_state & MLXCX_WQ_STARTED &&
586 			    !mlxcx_cmd_stop_rq(mlxp, wq)) {
587 				mlxcx_warn(mlxp, "failed to stop rq %x",
588 				    wq->mlwq_num);
589 			}
590 			mutex_exit(&wq->mlwq_mtx);
591 		}
592 		taskq_destroy(g->mlg_refill_tq);
593 		g->mlg_state &= ~MLXCX_GROUP_RUNNING;
594 	}
595 
596 	if (g->mlg_state & MLXCX_GROUP_TIRTIS) {
597 		for (i = 0; i < MLXCX_TIRS_PER_GROUP; ++i) {
598 			mlxcx_tir_t *tir = &g->mlg_tir[i];
599 			if (tir->mltir_state & MLXCX_TIR_CREATED &&
600 			    !(tir->mltir_state & MLXCX_TIR_DESTROYED)) {
601 				if (!mlxcx_cmd_destroy_tir(mlxp, tir)) {
602 					mlxcx_warn(mlxp,
603 					    "failed to destroy tir %u "
604 					    "for rx ring", tir->mltir_num);
605 				}
606 			}
607 		}
608 		g->mlg_state &= ~MLXCX_GROUP_TIRTIS;
609 	}
610 
611 	if (g->mlg_state & MLXCX_GROUP_RQT) {
612 		if (g->mlg_rqt->mlrqt_state & MLXCX_RQT_CREATED &&
613 		    !(g->mlg_rqt->mlrqt_state & MLXCX_RQT_DESTROYED)) {
614 			if (!mlxcx_cmd_destroy_rqt(mlxp, g->mlg_rqt)) {
615 				mlxcx_warn(mlxp, "failed to destroy rqt %u "
616 				    "for rx ring", g->mlg_rqt->mlrqt_num);
617 			}
618 			kmem_free(g->mlg_rqt->mlrqt_rq,
619 			    g->mlg_rqt->mlrqt_rq_size);
620 			g->mlg_rqt->mlrqt_rq = NULL;
621 			kmem_free(g->mlg_rqt, sizeof (mlxcx_rqtable_t));
622 			g->mlg_rqt = NULL;
623 		}
624 		g->mlg_state &= ~MLXCX_GROUP_RQT;
625 	}
626 
627 	for (i = 0; i < g->mlg_nwqs; ++i) {
628 		wq = &g->mlg_wqs[i];
629 		cq = wq->mlwq_cq;
630 		mlxcx_wq_teardown(mlxp, wq);
631 		if (cq != NULL)
632 			mlxcx_cq_teardown(mlxp, cq);
633 	}
634 	kmem_free(g->mlg_wqs, g->mlg_wqs_size);
635 	g->mlg_wqs = NULL;
636 	g->mlg_state &= ~MLXCX_GROUP_WQS;
637 
638 	mutex_exit(&g->mlg_mtx);
639 	mutex_exit(&g->mlg_port->mlp_mtx);
640 
641 	mutex_destroy(&g->mlg_mtx);
642 
643 	g->mlg_state &= ~MLXCX_GROUP_INIT;
644 	ASSERT3S(g->mlg_state, ==, 0);
645 }
646 
647 void
648 mlxcx_teardown_tx_group(mlxcx_t *mlxp, mlxcx_ring_group_t *g)
649 {
650 	mlxcx_work_queue_t *wq;
651 	mlxcx_completion_queue_t *cq;
652 	uint_t i;
653 
654 	mutex_enter(&g->mlg_mtx);
655 
656 	if (g->mlg_state & MLXCX_GROUP_WQS) {
657 		for (i = 0; i < g->mlg_nwqs; ++i) {
658 			wq = &g->mlg_wqs[i];
659 			mutex_enter(&wq->mlwq_mtx);
660 			cq = wq->mlwq_cq;
661 			if (wq->mlwq_state & MLXCX_WQ_STARTED &&
662 			    !mlxcx_cmd_stop_sq(mlxp, wq)) {
663 				mlxcx_warn(mlxp, "failed to stop sq %x",
664 				    wq->mlwq_num);
665 			}
666 			mutex_exit(&wq->mlwq_mtx);
667 			mlxcx_wq_teardown(mlxp, wq);
668 			if (cq != NULL)
669 				mlxcx_cq_teardown(mlxp, cq);
670 		}
671 		g->mlg_state &= ~MLXCX_GROUP_RUNNING;
672 		kmem_free(g->mlg_wqs, g->mlg_wqs_size);
673 		g->mlg_wqs = NULL;
674 		g->mlg_state &= ~MLXCX_GROUP_WQS;
675 	}
676 
677 	if ((g->mlg_state & MLXCX_GROUP_TIRTIS) &&
678 	    g->mlg_tis.mltis_state & MLXCX_TIS_CREATED &&
679 	    !(g->mlg_tis.mltis_state & MLXCX_TIS_DESTROYED)) {
680 		if (!mlxcx_cmd_destroy_tis(mlxp, &g->mlg_tis)) {
681 			mlxcx_warn(mlxp, "failed to destroy tis %u for tx ring",
682 			    g->mlg_tis.mltis_num);
683 		}
684 	}
685 	g->mlg_state &= ~MLXCX_GROUP_TIRTIS;
686 
687 	mutex_exit(&g->mlg_mtx);
688 	mutex_destroy(&g->mlg_mtx);
689 	g->mlg_state &= ~MLXCX_GROUP_INIT;
690 	ASSERT3S(g->mlg_state, ==, 0);
691 }
692 
693 void
694 mlxcx_teardown_groups(mlxcx_t *mlxp)
695 {
696 	mlxcx_ring_group_t *g;
697 	uint_t i;
698 
699 	for (i = 0; i < mlxp->mlx_rx_ngroups; ++i) {
700 		g = &mlxp->mlx_rx_groups[i];
701 		if (!(g->mlg_state & MLXCX_GROUP_INIT))
702 			continue;
703 		ASSERT3S(g->mlg_type, ==, MLXCX_GROUP_RX);
704 		mlxcx_quiesce_rx_cqs(mlxp, g);
705 	}
706 
707 	for (i = 0; i < mlxp->mlx_rx_ngroups; ++i) {
708 		g = &mlxp->mlx_rx_groups[i];
709 		if (!(g->mlg_state & MLXCX_GROUP_INIT))
710 			continue;
711 		mlxcx_teardown_rx_group(mlxp, g);
712 	}
713 
714 	kmem_free(mlxp->mlx_rx_groups, mlxp->mlx_rx_groups_size);
715 	mlxp->mlx_rx_groups = NULL;
716 
717 	for (i = 0; i < mlxp->mlx_tx_ngroups; ++i) {
718 		g = &mlxp->mlx_tx_groups[i];
719 		if (!(g->mlg_state & MLXCX_GROUP_INIT))
720 			continue;
721 		ASSERT3S(g->mlg_type, ==, MLXCX_GROUP_TX);
722 		mlxcx_teardown_tx_group(mlxp, g);
723 	}
724 
725 	kmem_free(mlxp->mlx_tx_groups, mlxp->mlx_tx_groups_size);
726 	mlxp->mlx_tx_groups = NULL;
727 }
728 
729 boolean_t
730 mlxcx_rx_group_setup(mlxcx_t *mlxp, mlxcx_ring_group_t *g)
731 {
732 	mlxcx_event_queue_t *eq;
733 	mlxcx_completion_queue_t *cq;
734 	mlxcx_work_queue_t *rq;
735 	mlxcx_flow_table_t *ft;
736 	mlxcx_flow_group_t *fg;
737 	mlxcx_flow_entry_t *fe;
738 	uint_t ent_shift;
739 	uint_t i, j;
740 
741 	ASSERT3S(g->mlg_state, ==, 0);
742 
743 	mutex_init(&g->mlg_mtx, NULL, MUTEX_DRIVER,
744 	    DDI_INTR_PRI(mlxp->mlx_intr_pri));
745 	mutex_enter(&g->mlg_mtx);
746 	g->mlg_mlx = mlxp;
747 	g->mlg_type = MLXCX_GROUP_RX;
748 	g->mlg_port = &mlxp->mlx_ports[0];
749 	g->mlg_state |= MLXCX_GROUP_INIT;
750 
751 	g->mlg_nwqs = mlxp->mlx_props.mldp_rx_nrings_per_small_group;
752 	i = g - &mlxp->mlx_rx_groups[0];
753 	if (i < mlxp->mlx_props.mldp_rx_ngroups_large)
754 		g->mlg_nwqs = mlxp->mlx_props.mldp_rx_nrings_per_large_group;
755 
756 	g->mlg_wqs_size = g->mlg_nwqs * sizeof (mlxcx_work_queue_t);
757 	g->mlg_wqs = kmem_zalloc(g->mlg_wqs_size, KM_SLEEP);
758 	g->mlg_state |= MLXCX_GROUP_WQS;
759 
760 	g->mlg_rqt = kmem_zalloc(sizeof (mlxcx_rqtable_t), KM_SLEEP);
761 	g->mlg_rqt->mlrqt_max = 2;
762 	while (g->mlg_rqt->mlrqt_max < g->mlg_nwqs)
763 		g->mlg_rqt->mlrqt_max <<= 1;
764 	g->mlg_rqt->mlrqt_rq_size = g->mlg_rqt->mlrqt_max *
765 	    sizeof (mlxcx_work_queue_t *);
766 	g->mlg_rqt->mlrqt_rq = kmem_zalloc(g->mlg_rqt->mlrqt_rq_size, KM_SLEEP);
767 	g->mlg_state |= MLXCX_GROUP_RQT;
768 
769 	for (i = 0; i < g->mlg_nwqs; ++i) {
770 		eq = NULL;
771 		while (eq == NULL) {
772 			eq = &mlxp->mlx_eqs[mlxp->mlx_next_eq++];
773 			if (mlxp->mlx_next_eq >= mlxp->mlx_intr_count)
774 				mlxp->mlx_next_eq = 1;
775 			if (eq->mleq_type != MLXCX_EQ_TYPE_ANY &&
776 			    eq->mleq_type != MLXCX_EQ_TYPE_RX) {
777 				/* Try the next one */
778 				eq = NULL;
779 			}
780 		}
781 
782 		/*
783 		 * A single completion is indicated for each rq entry as
784 		 * it is used. So, the number of cq entries never needs
785 		 * to be larger than the rq.
786 		 */
787 		ent_shift = MIN(mlxp->mlx_props.mldp_cq_size_shift,
788 		    mlxp->mlx_props.mldp_rq_size_shift);
789 		if (!mlxcx_cq_setup(mlxp, eq, &cq, ent_shift)) {
790 			g->mlg_nwqs = i;
791 			break;
792 		}
793 
794 		cq->mlcq_stats = &g->mlg_port->mlp_stats;
795 
796 		rq = &g->mlg_wqs[i];
797 		if (!mlxcx_rq_setup(mlxp, cq, rq)) {
798 			g->mlg_nwqs = i;
799 			break;
800 		}
801 		g->mlg_rqt->mlrqt_rq[g->mlg_rqt->mlrqt_used++] = rq;
802 		g->mlg_rqt->mlrqt_state |= MLXCX_RQT_DIRTY;
803 		rq->mlwq_group = g;
804 	}
805 	if (g->mlg_nwqs == 0) {
806 		mutex_exit(&g->mlg_mtx);
807 		return (B_FALSE);
808 	}
809 
810 	if (!mlxcx_cmd_create_rqt(mlxp, g->mlg_rqt)) {
811 		mutex_exit(&g->mlg_mtx);
812 		return (B_FALSE);
813 	}
814 
815 	for (i = 0; i < MLXCX_TIRS_PER_GROUP; ++i) {
816 		mlxcx_tir_t *tir = &g->mlg_tir[i];
817 		tir->mltir_tdom = &mlxp->mlx_tdom;
818 		switch (i) {
819 		case MLXCX_TIR_ROLE_OTHER:
820 			tir->mltir_type = MLXCX_TIR_DIRECT;
821 			tir->mltir_rq = &g->mlg_wqs[0];
822 			break;
823 		case MLXCX_TIR_ROLE_IPv4:
824 		case MLXCX_TIR_ROLE_IPv6:
825 		case MLXCX_TIR_ROLE_TCPv4:
826 		case MLXCX_TIR_ROLE_TCPv6:
827 		case MLXCX_TIR_ROLE_UDPv4:
828 		case MLXCX_TIR_ROLE_UDPv6:
829 			tir->mltir_type = MLXCX_TIR_INDIRECT;
830 			tir->mltir_rqtable = g->mlg_rqt;
831 			tir->mltir_hash_fn = MLXCX_TIR_HASH_TOEPLITZ;
832 			(void) random_get_pseudo_bytes(tir->mltir_toeplitz_key,
833 			    sizeof (tir->mltir_toeplitz_key));
834 			break;
835 		}
836 		switch (i) {
837 		case MLXCX_TIR_ROLE_OTHER:
838 			break;
839 		case MLXCX_TIR_ROLE_IPv4:
840 		case MLXCX_TIR_ROLE_TCPv4:
841 		case MLXCX_TIR_ROLE_UDPv4:
842 			tir->mltir_l3_type = MLXCX_RX_HASH_L3_IPv4;
843 			tir->mltir_hash_fields =
844 			    MLXCX_RX_HASH_SRC_IP | MLXCX_RX_HASH_DST_IP;
845 			break;
846 		case MLXCX_TIR_ROLE_IPv6:
847 		case MLXCX_TIR_ROLE_TCPv6:
848 		case MLXCX_TIR_ROLE_UDPv6:
849 			tir->mltir_l3_type = MLXCX_RX_HASH_L3_IPv6;
850 			tir->mltir_hash_fields =
851 			    MLXCX_RX_HASH_SRC_IP | MLXCX_RX_HASH_DST_IP;
852 			break;
853 		}
854 		switch (i) {
855 		case MLXCX_TIR_ROLE_OTHER:
856 		case MLXCX_TIR_ROLE_IPv4:
857 		case MLXCX_TIR_ROLE_IPv6:
858 			break;
859 		case MLXCX_TIR_ROLE_TCPv4:
860 		case MLXCX_TIR_ROLE_TCPv6:
861 			tir->mltir_l4_type = MLXCX_RX_HASH_L4_TCP;
862 			tir->mltir_hash_fields |=
863 			    MLXCX_RX_HASH_L4_SPORT | MLXCX_RX_HASH_L4_DPORT;
864 			break;
865 		case MLXCX_TIR_ROLE_UDPv4:
866 		case MLXCX_TIR_ROLE_UDPv6:
867 			tir->mltir_l4_type = MLXCX_RX_HASH_L4_UDP;
868 			tir->mltir_hash_fields |=
869 			    MLXCX_RX_HASH_L4_SPORT | MLXCX_RX_HASH_L4_DPORT;
870 			break;
871 		}
872 
873 		if (!mlxcx_cmd_create_tir(mlxp, tir)) {
874 			mutex_exit(&g->mlg_mtx);
875 			return (B_FALSE);
876 		}
877 
878 		g->mlg_state |= MLXCX_GROUP_TIRTIS;
879 	}
880 
881 	/*
882 	 * Flow table: our RX hashing breakout table for RSS
883 	 */
884 
885 	g->mlg_rx_hash_ft = (ft = kmem_zalloc(sizeof (mlxcx_flow_table_t),
886 	    KM_SLEEP));
887 	mutex_init(&ft->mlft_mtx, NULL, MUTEX_DRIVER,
888 	    DDI_INTR_PRI(mlxp->mlx_intr_pri));
889 	avl_create(&g->mlg_rx_macs, mlxcx_grmac_compare,
890 	    sizeof (mlxcx_group_mac_t),
891 	    offsetof(mlxcx_group_mac_t, mlgm_group_entry));
892 	g->mlg_state |= MLXCX_GROUP_FLOWS;
893 
894 	mutex_enter(&ft->mlft_mtx);
895 
896 	ft->mlft_type = MLXCX_FLOW_TABLE_NIC_RX;
897 	ft->mlft_level = 2;
898 	ft->mlft_port = g->mlg_port;
899 	ft->mlft_entshift = MLXCX_RX_HASH_FT_SIZE_SHIFT;
900 	ft->mlft_nents = (1 << ft->mlft_entshift);
901 	ASSERT3U(ft->mlft_nents, >=, MLXCX_TIRS_PER_GROUP);
902 	ft->mlft_entsize = ft->mlft_nents * sizeof (mlxcx_flow_entry_t);
903 	ft->mlft_ent = kmem_zalloc(ft->mlft_entsize, KM_SLEEP);
904 	list_create(&ft->mlft_groups, sizeof (mlxcx_flow_group_t),
905 	    offsetof(mlxcx_flow_group_t, mlfg_entry));
906 
907 	for (j = 0; j < ft->mlft_nents; ++j) {
908 		ft->mlft_ent[j].mlfe_table = ft;
909 		ft->mlft_ent[j].mlfe_index = j;
910 	}
911 
912 	if (!mlxcx_cmd_create_flow_table(mlxp, ft)) {
913 		mutex_exit(&ft->mlft_mtx);
914 		mutex_exit(&g->mlg_mtx);
915 		return (B_FALSE);
916 	}
917 
918 	fg = kmem_zalloc(sizeof (mlxcx_flow_group_t), KM_SLEEP);
919 	list_insert_tail(&ft->mlft_groups, fg);
920 	fg->mlfg_table = ft;
921 	fg->mlfg_size = 1;
922 	fg->mlfg_mask |= MLXCX_FLOW_MATCH_IP_VER | MLXCX_FLOW_MATCH_IP_PROTO;
923 	if (!mlxcx_setup_flow_group(mlxp, ft, fg)) {
924 		mutex_exit(&ft->mlft_mtx);
925 		mutex_exit(&g->mlg_mtx);
926 		return (B_FALSE);
927 	}
928 	fe = list_head(&fg->mlfg_entries);
929 	fe->mlfe_ip_version = 6;
930 	fe->mlfe_ip_proto = IPPROTO_UDP;
931 	fe->mlfe_action = MLXCX_FLOW_ACTION_FORWARD;
932 	fe->mlfe_dest[fe->mlfe_ndest++].mlfed_tir =
933 	    &g->mlg_tir[MLXCX_TIR_ROLE_UDPv6];
934 	if (!mlxcx_cmd_set_flow_table_entry(mlxp, fe)) {
935 		mutex_exit(&ft->mlft_mtx);
936 		mutex_exit(&g->mlg_mtx);
937 		return (B_FALSE);
938 	}
939 
940 	fg = kmem_zalloc(sizeof (mlxcx_flow_group_t), KM_SLEEP);
941 	list_insert_tail(&ft->mlft_groups, fg);
942 	fg->mlfg_table = ft;
943 	fg->mlfg_size = 1;
944 	fg->mlfg_mask |= MLXCX_FLOW_MATCH_IP_VER | MLXCX_FLOW_MATCH_IP_PROTO;
945 	if (!mlxcx_setup_flow_group(mlxp, ft, fg)) {
946 		mutex_exit(&ft->mlft_mtx);
947 		mutex_exit(&g->mlg_mtx);
948 		return (B_FALSE);
949 	}
950 	fe = list_head(&fg->mlfg_entries);
951 	fe->mlfe_ip_version = 4;
952 	fe->mlfe_ip_proto = IPPROTO_UDP;
953 	fe->mlfe_action = MLXCX_FLOW_ACTION_FORWARD;
954 	fe->mlfe_dest[fe->mlfe_ndest++].mlfed_tir =
955 	    &g->mlg_tir[MLXCX_TIR_ROLE_UDPv4];
956 	if (!mlxcx_cmd_set_flow_table_entry(mlxp, fe)) {
957 		mutex_exit(&ft->mlft_mtx);
958 		mutex_exit(&g->mlg_mtx);
959 		return (B_FALSE);
960 	}
961 
962 	fg = kmem_zalloc(sizeof (mlxcx_flow_group_t), KM_SLEEP);
963 	list_insert_tail(&ft->mlft_groups, fg);
964 	fg->mlfg_table = ft;
965 	fg->mlfg_size = 1;
966 	fg->mlfg_mask |= MLXCX_FLOW_MATCH_IP_VER | MLXCX_FLOW_MATCH_IP_PROTO;
967 	if (!mlxcx_setup_flow_group(mlxp, ft, fg)) {
968 		mutex_exit(&ft->mlft_mtx);
969 		mutex_exit(&g->mlg_mtx);
970 		return (B_FALSE);
971 	}
972 	fe = list_head(&fg->mlfg_entries);
973 	fe->mlfe_ip_version = 6;
974 	fe->mlfe_ip_proto = IPPROTO_TCP;
975 	fe->mlfe_action = MLXCX_FLOW_ACTION_FORWARD;
976 	fe->mlfe_dest[fe->mlfe_ndest++].mlfed_tir =
977 	    &g->mlg_tir[MLXCX_TIR_ROLE_TCPv6];
978 	if (!mlxcx_cmd_set_flow_table_entry(mlxp, fe)) {
979 		mutex_exit(&ft->mlft_mtx);
980 		mutex_exit(&g->mlg_mtx);
981 		return (B_FALSE);
982 	}
983 
984 	fg = kmem_zalloc(sizeof (mlxcx_flow_group_t), KM_SLEEP);
985 	list_insert_tail(&ft->mlft_groups, fg);
986 	fg->mlfg_table = ft;
987 	fg->mlfg_size = 1;
988 	fg->mlfg_mask |= MLXCX_FLOW_MATCH_IP_VER | MLXCX_FLOW_MATCH_IP_PROTO;
989 	if (!mlxcx_setup_flow_group(mlxp, ft, fg)) {
990 		mutex_exit(&ft->mlft_mtx);
991 		mutex_exit(&g->mlg_mtx);
992 		return (B_FALSE);
993 	}
994 	fe = list_head(&fg->mlfg_entries);
995 	fe->mlfe_ip_version = 4;
996 	fe->mlfe_ip_proto = IPPROTO_TCP;
997 	fe->mlfe_action = MLXCX_FLOW_ACTION_FORWARD;
998 	fe->mlfe_dest[fe->mlfe_ndest++].mlfed_tir =
999 	    &g->mlg_tir[MLXCX_TIR_ROLE_TCPv4];
1000 	if (!mlxcx_cmd_set_flow_table_entry(mlxp, fe)) {
1001 		mutex_exit(&ft->mlft_mtx);
1002 		mutex_exit(&g->mlg_mtx);
1003 		return (B_FALSE);
1004 	}
1005 
1006 	fg = kmem_zalloc(sizeof (mlxcx_flow_group_t), KM_SLEEP);
1007 	list_insert_tail(&ft->mlft_groups, fg);
1008 	fg->mlfg_table = ft;
1009 	fg->mlfg_size = 1;
1010 	fg->mlfg_mask |= MLXCX_FLOW_MATCH_IP_VER;
1011 	if (!mlxcx_setup_flow_group(mlxp, ft, fg)) {
1012 		mutex_exit(&ft->mlft_mtx);
1013 		mutex_exit(&g->mlg_mtx);
1014 		return (B_FALSE);
1015 	}
1016 	fe = list_head(&fg->mlfg_entries);
1017 	fe->mlfe_ip_version = 6;
1018 	fe->mlfe_action = MLXCX_FLOW_ACTION_FORWARD;
1019 	fe->mlfe_dest[fe->mlfe_ndest++].mlfed_tir =
1020 	    &g->mlg_tir[MLXCX_TIR_ROLE_IPv6];
1021 	if (!mlxcx_cmd_set_flow_table_entry(mlxp, fe)) {
1022 		mutex_exit(&ft->mlft_mtx);
1023 		mutex_exit(&g->mlg_mtx);
1024 		return (B_FALSE);
1025 	}
1026 
1027 	fg = kmem_zalloc(sizeof (mlxcx_flow_group_t), KM_SLEEP);
1028 	list_insert_tail(&ft->mlft_groups, fg);
1029 	fg->mlfg_table = ft;
1030 	fg->mlfg_size = 1;
1031 	fg->mlfg_mask |= MLXCX_FLOW_MATCH_IP_VER;
1032 	if (!mlxcx_setup_flow_group(mlxp, ft, fg)) {
1033 		mutex_exit(&ft->mlft_mtx);
1034 		mutex_exit(&g->mlg_mtx);
1035 		return (B_FALSE);
1036 	}
1037 	fe = list_head(&fg->mlfg_entries);
1038 	fe->mlfe_ip_version = 4;
1039 	fe->mlfe_action = MLXCX_FLOW_ACTION_FORWARD;
1040 	fe->mlfe_dest[fe->mlfe_ndest++].mlfed_tir =
1041 	    &g->mlg_tir[MLXCX_TIR_ROLE_IPv4];
1042 	if (!mlxcx_cmd_set_flow_table_entry(mlxp, fe)) {
1043 		mutex_exit(&ft->mlft_mtx);
1044 		mutex_exit(&g->mlg_mtx);
1045 		return (B_FALSE);
1046 	}
1047 
1048 	fg = kmem_zalloc(sizeof (mlxcx_flow_group_t), KM_SLEEP);
1049 	list_insert_tail(&ft->mlft_groups, fg);
1050 	fg->mlfg_table = ft;
1051 	fg->mlfg_size = 1;
1052 	if (!mlxcx_setup_flow_group(mlxp, ft, fg)) {
1053 		mutex_exit(&ft->mlft_mtx);
1054 		mutex_exit(&g->mlg_mtx);
1055 		return (B_FALSE);
1056 	}
1057 	fe = list_head(&fg->mlfg_entries);
1058 	fe->mlfe_action = MLXCX_FLOW_ACTION_FORWARD;
1059 	fe->mlfe_dest[fe->mlfe_ndest++].mlfed_tir =
1060 	    &g->mlg_tir[MLXCX_TIR_ROLE_OTHER];
1061 	if (!mlxcx_cmd_set_flow_table_entry(mlxp, fe)) {
1062 		mutex_exit(&ft->mlft_mtx);
1063 		mutex_exit(&g->mlg_mtx);
1064 		return (B_FALSE);
1065 	}
1066 
1067 	mutex_exit(&ft->mlft_mtx);
1068 
1069 	/*
1070 	 * Flow table: the VLAN breakout table for doing VLAN filtering after
1071 	 * we've matched a MAC address.
1072 	 */
1073 
1074 	g->mlg_rx_vlan_ft = (ft = kmem_zalloc(sizeof (mlxcx_flow_table_t),
1075 	    KM_SLEEP));
1076 	mutex_init(&ft->mlft_mtx, NULL, MUTEX_DRIVER,
1077 	    DDI_INTR_PRI(mlxp->mlx_intr_pri));
1078 	list_create(&g->mlg_rx_vlans, sizeof (mlxcx_group_vlan_t),
1079 	    offsetof(mlxcx_group_vlan_t, mlgv_entry));
1080 
1081 	mutex_enter(&ft->mlft_mtx);
1082 
1083 	ft->mlft_type = MLXCX_FLOW_TABLE_NIC_RX;
1084 	ft->mlft_level = 1;
1085 	ft->mlft_port = g->mlg_port;
1086 	ft->mlft_entshift = mlxp->mlx_props.mldp_ftbl_vlan_size_shift;
1087 	ft->mlft_nents = (1 << ft->mlft_entshift);
1088 	ft->mlft_entsize = ft->mlft_nents * sizeof (mlxcx_flow_entry_t);
1089 	ft->mlft_ent = kmem_zalloc(ft->mlft_entsize, KM_SLEEP);
1090 	list_create(&ft->mlft_groups, sizeof (mlxcx_flow_group_t),
1091 	    offsetof(mlxcx_flow_group_t, mlfg_entry));
1092 
1093 	for (j = 0; j < ft->mlft_nents; ++j) {
1094 		fe = &ft->mlft_ent[j];
1095 		fe->mlfe_table = ft;
1096 		fe->mlfe_index = j;
1097 		fe->mlfe_action = MLXCX_FLOW_ACTION_FORWARD;
1098 		fe->mlfe_dest[fe->mlfe_ndest++].mlfed_flow = g->mlg_rx_hash_ft;
1099 	}
1100 
1101 	if (!mlxcx_cmd_create_flow_table(mlxp, ft)) {
1102 		mutex_exit(&ft->mlft_mtx);
1103 		mutex_exit(&g->mlg_mtx);
1104 		return (B_FALSE);
1105 	}
1106 
1107 	/* First group is all actual matched VLANs */
1108 	fg = kmem_zalloc(sizeof (mlxcx_flow_group_t), KM_SLEEP);
1109 	g->mlg_rx_vlan_fg = fg;
1110 	list_insert_tail(&ft->mlft_groups, fg);
1111 	fg->mlfg_table = ft;
1112 	fg->mlfg_size = ft->mlft_nents - 2;
1113 	fg->mlfg_mask |= MLXCX_FLOW_MATCH_VLAN;
1114 	fg->mlfg_mask |= MLXCX_FLOW_MATCH_VID;
1115 	if (!mlxcx_setup_flow_group(mlxp, ft, fg)) {
1116 		mutex_exit(&ft->mlft_mtx);
1117 		mutex_exit(&g->mlg_mtx);
1118 		return (B_FALSE);
1119 	}
1120 
1121 	/*
1122 	 * Then the "default" entry which we enable when we have no VLAN IDs
1123 	 * added to the group (we start with this enabled).
1124 	 */
1125 	fg = kmem_zalloc(sizeof (mlxcx_flow_group_t), KM_SLEEP);
1126 	g->mlg_rx_vlan_def_fg = fg;
1127 	list_insert_tail(&ft->mlft_groups, fg);
1128 	fg->mlfg_table = ft;
1129 	fg->mlfg_size = 1;
1130 	if (!mlxcx_setup_flow_group(mlxp, ft, fg)) {
1131 		mutex_exit(&ft->mlft_mtx);
1132 		mutex_exit(&g->mlg_mtx);
1133 		return (B_FALSE);
1134 	}
1135 	fe = list_head(&fg->mlfg_entries);
1136 	if (!mlxcx_cmd_set_flow_table_entry(mlxp, fe)) {
1137 		mutex_exit(&ft->mlft_mtx);
1138 		mutex_exit(&g->mlg_mtx);
1139 		return (B_FALSE);
1140 	}
1141 
1142 	/*
1143 	 * Finally, the promisc entry which points at the *hash ft* from the
1144 	 * default group. We only enable this when we have promisc on.
1145 	 */
1146 	fg = kmem_zalloc(sizeof (mlxcx_flow_group_t), KM_SLEEP);
1147 	g->mlg_rx_vlan_promisc_fg = fg;
1148 	list_insert_tail(&ft->mlft_groups, fg);
1149 	fg->mlfg_table = ft;
1150 	fg->mlfg_size = 1;
1151 	if (!mlxcx_setup_flow_group(mlxp, ft, fg)) {
1152 		mutex_exit(&ft->mlft_mtx);
1153 		mutex_exit(&g->mlg_mtx);
1154 		return (B_FALSE);
1155 	}
1156 	fe = list_head(&fg->mlfg_entries);
1157 	fe->mlfe_ndest = 1;
1158 	fe->mlfe_dest[0].mlfed_flow = mlxp->mlx_rx_groups[0].mlg_rx_hash_ft;
1159 
1160 	mutex_exit(&ft->mlft_mtx);
1161 
1162 	mutex_exit(&g->mlg_mtx);
1163 
1164 	return (B_TRUE);
1165 }
1166 
1167 boolean_t
1168 mlxcx_rx_ring_start(mlxcx_t *mlxp, mlxcx_ring_group_t *g,
1169     mlxcx_work_queue_t *rq)
1170 {
1171 	uint_t j;
1172 	mlxcx_buffer_t *b;
1173 	mlxcx_completion_queue_t *cq;
1174 
1175 	mutex_enter(&g->mlg_mtx);
1176 	/*
1177 	 * Sadly, even though MAC has the mgi_start callback, it is not always
1178 	 * called -- in particular when we are being managed under an aggr, the
1179 	 * mgi_start callback will only ever be called on the default group.
1180 	 *
1181 	 * So instead of asserting about the group state here, we have to
1182 	 * check it and call group start if needed.
1183 	 */
1184 	if (!(g->mlg_state & MLXCX_GROUP_RUNNING)) {
1185 		mutex_exit(&g->mlg_mtx);
1186 		if (!mlxcx_rx_group_start(mlxp, g))
1187 			return (B_FALSE);
1188 		mutex_enter(&g->mlg_mtx);
1189 	}
1190 	ASSERT(g->mlg_state & MLXCX_GROUP_RUNNING);
1191 
1192 	cq = rq->mlwq_cq;
1193 	ASSERT(cq != NULL);
1194 
1195 	mutex_enter(&cq->mlcq_mtx);
1196 	mutex_enter(&rq->mlwq_mtx);
1197 
1198 	if (rq->mlwq_state & MLXCX_WQ_STARTED) {
1199 		mutex_exit(&rq->mlwq_mtx);
1200 		mutex_exit(&cq->mlcq_mtx);
1201 		mutex_exit(&g->mlg_mtx);
1202 		return (B_TRUE);
1203 	}
1204 
1205 	if (!mlxcx_cmd_start_rq(mlxp, rq)) {
1206 		mutex_exit(&rq->mlwq_mtx);
1207 		mutex_exit(&cq->mlcq_mtx);
1208 		mutex_exit(&g->mlg_mtx);
1209 		return (B_FALSE);
1210 	}
1211 	ASSERT(rq->mlwq_state & MLXCX_WQ_STARTED);
1212 
1213 	ASSERT0(rq->mlwq_state & MLXCX_WQ_BUFFERS);
1214 	rq->mlwq_state |= MLXCX_WQ_BUFFERS;
1215 
1216 	for (j = 0; j < rq->mlwq_nents; ++j) {
1217 		if (!mlxcx_buf_create(mlxp, rq->mlwq_bufs, &b))
1218 			break;
1219 		mlxcx_buf_return(mlxp, b);
1220 	}
1221 	for (j = 0; j < rq->mlwq_nents / 2; ++j) {
1222 		if (!mlxcx_buf_create(mlxp, rq->mlwq_bufs, &b))
1223 			break;
1224 		mlxcx_buf_return(mlxp, b);
1225 	}
1226 
1227 	mlxcx_rq_refill(mlxp, rq);
1228 
1229 	mutex_exit(&rq->mlwq_mtx);
1230 	mutex_exit(&cq->mlcq_mtx);
1231 	mutex_exit(&g->mlg_mtx);
1232 
1233 	return (B_TRUE);
1234 }
1235 
1236 boolean_t
1237 mlxcx_rx_group_start(mlxcx_t *mlxp, mlxcx_ring_group_t *g)
1238 {
1239 	mlxcx_flow_table_t *ft;
1240 	mlxcx_flow_group_t *fg;
1241 	mlxcx_flow_entry_t *fe;
1242 	char tq_name[TASKQ_NAMELEN];
1243 
1244 	mutex_enter(&g->mlg_mtx);
1245 
1246 	if (g->mlg_state & MLXCX_GROUP_RUNNING) {
1247 		mutex_exit(&g->mlg_mtx);
1248 		return (B_TRUE);
1249 	}
1250 
1251 	ASSERT0(g->mlg_state & MLXCX_GROUP_RUNNING);
1252 
1253 	g->mlg_state |= MLXCX_GROUP_RUNNING;
1254 
1255 	(void) snprintf(tq_name, sizeof (tq_name), "%s_refill_%d_%ld",
1256 	    ddi_driver_name(mlxp->mlx_dip), mlxp->mlx_inst,
1257 	    g - &mlxp->mlx_rx_groups[0]);
1258 
1259 	/*
1260 	 * Create one refill taskq per group with one thread per work queue.
1261 	 * The refill task may block waiting for resources, so by effectively
1262 	 * having one thread per work queue we avoid work queues blocking each
1263 	 * other.
1264 	 */
1265 	if ((g->mlg_refill_tq = taskq_create(tq_name, g->mlg_nwqs, minclsyspri,
1266 	    g->mlg_nwqs, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
1267 		mlxcx_warn(mlxp, "failed to create rq refill task queue");
1268 		mutex_exit(&g->mlg_mtx);
1269 		return (B_FALSE);
1270 	}
1271 
1272 	if (g == &mlxp->mlx_rx_groups[0]) {
1273 		ft = g->mlg_port->mlp_rx_flow;
1274 		mutex_enter(&ft->mlft_mtx);
1275 
1276 		/*
1277 		 * Broadcast and promisc entries go directly to group 0's
1278 		 * RSS hash fanout flow table. They bypass VLAN filtering.
1279 		 */
1280 		fg = g->mlg_port->mlp_bcast;
1281 		fe = list_head(&fg->mlfg_entries);
1282 		fe->mlfe_dest[fe->mlfe_ndest++].mlfed_flow = g->mlg_rx_hash_ft;
1283 		if (!mlxcx_cmd_set_flow_table_entry(mlxp, fe)) {
1284 			mutex_exit(&ft->mlft_mtx);
1285 			g->mlg_state &= ~MLXCX_GROUP_RUNNING;
1286 			taskq_destroy(g->mlg_refill_tq);
1287 			mutex_exit(&g->mlg_mtx);
1288 			return (B_FALSE);
1289 		}
1290 
1291 		fg = g->mlg_port->mlp_promisc;
1292 		fe = list_head(&fg->mlfg_entries);
1293 		fe->mlfe_dest[fe->mlfe_ndest++].mlfed_flow = g->mlg_rx_hash_ft;
1294 		/*
1295 		 * Don't actually set the promisc entry until promisc is
1296 		 * enabled.
1297 		 */
1298 
1299 		mutex_exit(&ft->mlft_mtx);
1300 	}
1301 
1302 	mutex_exit(&g->mlg_mtx);
1303 
1304 	return (B_TRUE);
1305 }
1306 
1307 boolean_t
1308 mlxcx_tx_group_setup(mlxcx_t *mlxp, mlxcx_ring_group_t *g)
1309 {
1310 	mlxcx_event_queue_t *eq;
1311 	mlxcx_completion_queue_t *cq;
1312 	mlxcx_work_queue_t *sq;
1313 	uint_t i;
1314 
1315 	ASSERT3S(g->mlg_state, ==, 0);
1316 
1317 	mutex_init(&g->mlg_mtx, NULL, MUTEX_DRIVER,
1318 	    DDI_INTR_PRI(mlxp->mlx_intr_pri));
1319 	g->mlg_state |= MLXCX_GROUP_INIT;
1320 	mutex_enter(&g->mlg_mtx);
1321 
1322 	g->mlg_mlx = mlxp;
1323 	g->mlg_type = MLXCX_GROUP_TX;
1324 	g->mlg_port = &mlxp->mlx_ports[0];
1325 
1326 	g->mlg_nwqs = mlxp->mlx_props.mldp_tx_nrings_per_group;
1327 	g->mlg_wqs_size = g->mlg_nwqs * sizeof (mlxcx_work_queue_t);
1328 	g->mlg_wqs = kmem_zalloc(g->mlg_wqs_size, KM_SLEEP);
1329 	g->mlg_state |= MLXCX_GROUP_WQS;
1330 
1331 	g->mlg_tis.mltis_tdom = &mlxp->mlx_tdom;
1332 
1333 	if (!mlxcx_cmd_create_tis(mlxp, &g->mlg_tis)) {
1334 		mutex_exit(&g->mlg_mtx);
1335 		return (B_FALSE);
1336 	}
1337 
1338 	g->mlg_state |= MLXCX_GROUP_TIRTIS;
1339 
1340 	for (i = 0; i < g->mlg_nwqs; ++i) {
1341 		eq = NULL;
1342 		while (eq == NULL) {
1343 			eq = &mlxp->mlx_eqs[mlxp->mlx_next_eq++];
1344 			if (mlxp->mlx_next_eq >= mlxp->mlx_intr_count)
1345 				mlxp->mlx_next_eq = 1;
1346 			if (eq->mleq_type != MLXCX_EQ_TYPE_ANY &&
1347 			    eq->mleq_type != MLXCX_EQ_TYPE_TX) {
1348 				/* Try the next one */
1349 				eq = NULL;
1350 			}
1351 		}
1352 
1353 		if (!mlxcx_cq_setup(mlxp, eq, &cq,
1354 		    mlxp->mlx_props.mldp_cq_size_shift))
1355 			return (B_FALSE);
1356 
1357 		cq->mlcq_stats = &g->mlg_port->mlp_stats;
1358 
1359 		sq = &g->mlg_wqs[i];
1360 		if (!mlxcx_sq_setup(mlxp, g->mlg_port, cq, &g->mlg_tis, sq)) {
1361 			mutex_exit(&g->mlg_mtx);
1362 			return (B_FALSE);
1363 		}
1364 		sq->mlwq_group = g;
1365 	}
1366 
1367 	mutex_exit(&g->mlg_mtx);
1368 
1369 	return (B_TRUE);
1370 }
1371 
1372 boolean_t
1373 mlxcx_tx_ring_start(mlxcx_t *mlxp, mlxcx_ring_group_t *g,
1374     mlxcx_work_queue_t *sq)
1375 {
1376 	uint_t i;
1377 	mlxcx_buffer_t *b;
1378 	mlxcx_completion_queue_t *cq;
1379 
1380 	mutex_enter(&g->mlg_mtx);
1381 
1382 	cq = sq->mlwq_cq;
1383 	ASSERT(cq != NULL);
1384 
1385 	mutex_enter(&cq->mlcq_mtx);
1386 	mutex_enter(&sq->mlwq_mtx);
1387 	if (sq->mlwq_state & MLXCX_WQ_STARTED) {
1388 		mutex_exit(&sq->mlwq_mtx);
1389 		mutex_exit(&cq->mlcq_mtx);
1390 		mutex_exit(&g->mlg_mtx);
1391 		return (B_TRUE);
1392 	}
1393 
1394 	ASSERT0(sq->mlwq_state & MLXCX_WQ_BUFFERS);
1395 	for (i = 0; i < sq->mlwq_nents; ++i) {
1396 		if (!mlxcx_buf_create_foreign(mlxp, sq->mlwq_foreign_bufs, &b))
1397 			break;
1398 		mlxcx_buf_return(mlxp, b);
1399 	}
1400 	for (i = 0; i < sq->mlwq_nents / 2; ++i) {
1401 		if (!mlxcx_buf_create_foreign(mlxp, sq->mlwq_foreign_bufs, &b))
1402 			break;
1403 		mlxcx_buf_return(mlxp, b);
1404 	}
1405 	for (i = 0; i < sq->mlwq_nents; ++i) {
1406 		if (!mlxcx_buf_create(mlxp, sq->mlwq_bufs, &b))
1407 			break;
1408 		mlxcx_buf_return(mlxp, b);
1409 	}
1410 	sq->mlwq_state |= MLXCX_WQ_BUFFERS;
1411 
1412 	if (!mlxcx_cmd_start_sq(mlxp, sq)) {
1413 		mutex_exit(&sq->mlwq_mtx);
1414 		mutex_exit(&cq->mlcq_mtx);
1415 		mutex_exit(&g->mlg_mtx);
1416 		return (B_FALSE);
1417 	}
1418 	g->mlg_state |= MLXCX_GROUP_RUNNING;
1419 
1420 	(void) mlxcx_sq_add_nop(mlxp, sq);
1421 
1422 	mutex_exit(&sq->mlwq_mtx);
1423 	mutex_exit(&cq->mlcq_mtx);
1424 	mutex_exit(&g->mlg_mtx);
1425 
1426 	return (B_TRUE);
1427 }
1428 
1429 static boolean_t
1430 mlxcx_sq_ring_dbell(mlxcx_t *mlxp, mlxcx_work_queue_t *mlwq, uint_t first)
1431 {
1432 	uint_t idx;
1433 	mlxcx_bf_t *bf;
1434 	ddi_fm_error_t err;
1435 	uint_t try = 0;
1436 
1437 	ASSERT3U(mlwq->mlwq_type, ==, MLXCX_WQ_TYPE_SENDQ);
1438 	ASSERT(mutex_owned(&mlwq->mlwq_mtx));
1439 
1440 	mlwq->mlwq_doorbell->mlwqd_send_counter = to_be16(mlwq->mlwq_pc);
1441 
1442 	ASSERT(mlwq->mlwq_cq != NULL);
1443 	ASSERT(mlwq->mlwq_cq->mlcq_eq != NULL);
1444 	idx = mlwq->mlwq_cq->mlcq_eq->mleq_intr_index & MLXCX_BF_PER_UAR_MASK;
1445 	bf = &mlwq->mlwq_uar->mlu_bf[idx];
1446 
1447 retry:
1448 	MLXCX_DMA_SYNC(mlwq->mlwq_doorbell_dma, DDI_DMA_SYNC_FORDEV);
1449 	ddi_fm_dma_err_get(mlwq->mlwq_doorbell_dma.mxdb_dma_handle, &err,
1450 	    DDI_FME_VERSION);
1451 	if (err.fme_status != DDI_FM_OK) {
1452 		if (try++ < mlxcx_doorbell_tries) {
1453 			ddi_fm_dma_err_clear(
1454 			    mlwq->mlwq_doorbell_dma.mxdb_dma_handle,
1455 			    DDI_FME_VERSION);
1456 			goto retry;
1457 		} else {
1458 			goto err;
1459 		}
1460 	}
1461 
1462 	mlxcx_put64(mlxp, bf->mbf_even, from_be64(
1463 	    mlwq->mlwq_bf_ent[first].mlsqbf_qwords[0]));
1464 	ddi_fm_acc_err_get(mlxp->mlx_regs_handle, &err,
1465 	    DDI_FME_VERSION);
1466 	if (err.fme_status == DDI_FM_OK)
1467 		return (B_TRUE);
1468 	if (try++ < mlxcx_doorbell_tries) {
1469 		ddi_fm_acc_err_clear(mlxp->mlx_regs_handle, DDI_FME_VERSION);
1470 		goto retry;
1471 	}
1472 
1473 err:
1474 	ddi_fm_service_impact(mlxp->mlx_dip, DDI_SERVICE_LOST);
1475 	return (B_FALSE);
1476 }
1477 
1478 boolean_t
1479 mlxcx_sq_add_nop(mlxcx_t *mlxp, mlxcx_work_queue_t *mlwq)
1480 {
1481 	uint_t index, start_pc;
1482 	mlxcx_sendq_ent_t *ent0;
1483 	ddi_fm_error_t err;
1484 
1485 	ASSERT(mutex_owned(&mlwq->mlwq_mtx));
1486 
1487 	index = mlwq->mlwq_pc & (mlwq->mlwq_nents - 1);
1488 	ent0 = &mlwq->mlwq_send_ent[index];
1489 	start_pc = mlwq->mlwq_pc;
1490 	++mlwq->mlwq_pc;
1491 	/*
1492 	 * This counter is manipulated in the interrupt handler, which
1493 	 * does not hold the mlwq_mtx, hence the atomic.
1494 	 */
1495 	atomic_inc_64(&mlwq->mlwq_wqebb_used);
1496 
1497 	bzero(ent0, sizeof (mlxcx_sendq_ent_t));
1498 	ent0->mlsqe_control.mlcs_opcode = MLXCX_WQE_OP_NOP;
1499 	ent0->mlsqe_control.mlcs_qp_or_sq = to_be24(mlwq->mlwq_num);
1500 	ent0->mlsqe_control.mlcs_wqe_index = to_be16(start_pc);
1501 
1502 	set_bits8(&ent0->mlsqe_control.mlcs_flags,
1503 	    MLXCX_SQE_FENCE_MODE, MLXCX_SQE_FENCE_NONE);
1504 	set_bits8(&ent0->mlsqe_control.mlcs_flags,
1505 	    MLXCX_SQE_COMPLETION_MODE, MLXCX_SQE_CQE_ALWAYS);
1506 
1507 	ent0->mlsqe_control.mlcs_ds = 1;
1508 
1509 	VERIFY0(ddi_dma_sync(mlwq->mlwq_dma.mxdb_dma_handle,
1510 	    (uintptr_t)ent0 - (uintptr_t)mlwq->mlwq_send_ent,
1511 	    sizeof (mlxcx_sendq_ent_t), DDI_DMA_SYNC_FORDEV));
1512 	ddi_fm_dma_err_get(mlwq->mlwq_dma.mxdb_dma_handle, &err,
1513 	    DDI_FME_VERSION);
1514 	if (err.fme_status != DDI_FM_OK) {
1515 		return (B_FALSE);
1516 	}
1517 	if (!mlxcx_sq_ring_dbell(mlxp, mlwq, index)) {
1518 		return (B_FALSE);
1519 	}
1520 	return (B_TRUE);
1521 }
1522 
1523 boolean_t
1524 mlxcx_sq_add_buffer(mlxcx_t *mlxp, mlxcx_work_queue_t *mlwq,
1525     uint8_t *inlinehdrs, size_t inlinelen, uint32_t chkflags,
1526     mlxcx_buffer_t *b0)
1527 {
1528 	uint_t index, first, ents;
1529 	mlxcx_completion_queue_t *cq;
1530 	mlxcx_sendq_ent_t *ent0;
1531 	mlxcx_sendq_extra_ent_t *ent;
1532 	mlxcx_wqe_data_seg_t *seg;
1533 	uint_t ptri, nptr;
1534 	const ddi_dma_cookie_t *c;
1535 	size_t rem;
1536 	uint64_t wqebb_used;
1537 	mlxcx_buffer_t *b;
1538 	ddi_fm_error_t err;
1539 	boolean_t rv;
1540 
1541 	ASSERT(mutex_owned(&mlwq->mlwq_mtx));
1542 	ASSERT3P(b0->mlb_tx_head, ==, b0);
1543 	ASSERT3U(b0->mlb_state, ==, MLXCX_BUFFER_ON_WQ);
1544 	cq = mlwq->mlwq_cq;
1545 
1546 	index = mlwq->mlwq_pc & (mlwq->mlwq_nents - 1);
1547 	ent0 = &mlwq->mlwq_send_ent[index];
1548 	b0->mlb_wqe_index = mlwq->mlwq_pc;
1549 	ents = 1;
1550 
1551 	first = index;
1552 
1553 	bzero(ent0, sizeof (mlxcx_sendq_ent_t));
1554 	ent0->mlsqe_control.mlcs_opcode = MLXCX_WQE_OP_SEND;
1555 	ent0->mlsqe_control.mlcs_qp_or_sq = to_be24(mlwq->mlwq_num);
1556 	ent0->mlsqe_control.mlcs_wqe_index = to_be16(b0->mlb_wqe_index);
1557 
1558 	set_bits8(&ent0->mlsqe_control.mlcs_flags,
1559 	    MLXCX_SQE_FENCE_MODE, MLXCX_SQE_FENCE_WAIT_OTHERS);
1560 	set_bits8(&ent0->mlsqe_control.mlcs_flags,
1561 	    MLXCX_SQE_COMPLETION_MODE, MLXCX_SQE_CQE_ALWAYS);
1562 
1563 	VERIFY3U(inlinelen, <=, sizeof (ent0->mlsqe_eth.mles_inline_headers));
1564 	set_bits16(&ent0->mlsqe_eth.mles_szflags,
1565 	    MLXCX_SQE_ETH_INLINE_HDR_SZ, inlinelen);
1566 	if (inlinelen > 0) {
1567 		bcopy(inlinehdrs, ent0->mlsqe_eth.mles_inline_headers,
1568 		    inlinelen);
1569 	}
1570 
1571 	ent0->mlsqe_control.mlcs_ds = offsetof(mlxcx_sendq_ent_t, mlsqe_data) /
1572 	    MLXCX_WQE_OCTOWORD;
1573 
1574 	if (chkflags & HCK_IPV4_HDRCKSUM) {
1575 		ASSERT(mlxp->mlx_caps->mlc_checksum);
1576 		set_bit8(&ent0->mlsqe_eth.mles_csflags,
1577 		    MLXCX_SQE_ETH_CSFLAG_L3_CHECKSUM);
1578 	}
1579 	if (chkflags & HCK_FULLCKSUM) {
1580 		ASSERT(mlxp->mlx_caps->mlc_checksum);
1581 		set_bit8(&ent0->mlsqe_eth.mles_csflags,
1582 		    MLXCX_SQE_ETH_CSFLAG_L4_CHECKSUM);
1583 	}
1584 
1585 	/*
1586 	 * mlwq_wqebb_used is only incremented whilst holding
1587 	 * the mlwq_mtx mutex, but it is decremented (atomically) in
1588 	 * the interrupt context *not* under mlwq_mtx mutex.
1589 	 * So, now take a snapshot of the number of used wqes which will
1590 	 * be a conistent maximum we can use whilst iterating through
1591 	 * the buffers and DMA cookies.
1592 	 */
1593 	wqebb_used = mlwq->mlwq_wqebb_used;
1594 
1595 	b = b0;
1596 	ptri = 0;
1597 	nptr = sizeof (ent0->mlsqe_data) / sizeof (mlxcx_wqe_data_seg_t);
1598 	seg = ent0->mlsqe_data;
1599 	while (b != NULL) {
1600 		rem = b->mlb_used;
1601 
1602 		c = NULL;
1603 		while (rem > 0 &&
1604 		    (c = mlxcx_dma_cookie_iter(&b->mlb_dma, c)) != NULL) {
1605 			if (ptri >= nptr) {
1606 				if ((ents + wqebb_used) >= mlwq->mlwq_nents)
1607 					return (B_FALSE);
1608 
1609 				index = (mlwq->mlwq_pc + ents) &
1610 				    (mlwq->mlwq_nents - 1);
1611 				ent = &mlwq->mlwq_send_extra_ent[index];
1612 				++ents;
1613 
1614 				seg = ent->mlsqe_data;
1615 				ptri = 0;
1616 				nptr = sizeof (ent->mlsqe_data) /
1617 				    sizeof (mlxcx_wqe_data_seg_t);
1618 			}
1619 
1620 			seg->mlds_lkey = to_be32(mlxp->mlx_rsvd_lkey);
1621 			if (c->dmac_size > rem) {
1622 				seg->mlds_byte_count = to_be32(rem);
1623 				rem = 0;
1624 			} else {
1625 				seg->mlds_byte_count = to_be32(c->dmac_size);
1626 				rem -= c->dmac_size;
1627 			}
1628 			seg->mlds_address = to_be64(c->dmac_laddress);
1629 			++seg;
1630 			++ptri;
1631 			++ent0->mlsqe_control.mlcs_ds;
1632 
1633 			ASSERT3U(ent0->mlsqe_control.mlcs_ds, <=,
1634 			    MLXCX_SQE_MAX_DS);
1635 		}
1636 
1637 		if (b == b0) {
1638 			b = list_head(&b0->mlb_tx_chain);
1639 		} else {
1640 			b = list_next(&b0->mlb_tx_chain, b);
1641 		}
1642 	}
1643 
1644 	b0->mlb_wqebbs = ents;
1645 	mlwq->mlwq_pc += ents;
1646 	atomic_add_64(&mlwq->mlwq_wqebb_used, ents);
1647 
1648 	for (; ptri < nptr; ++ptri, ++seg) {
1649 		seg->mlds_lkey = to_be32(MLXCX_NULL_LKEY);
1650 		seg->mlds_byte_count = to_be32(0);
1651 		seg->mlds_address = to_be64(0);
1652 	}
1653 
1654 	/*
1655 	 * Make sure the workqueue entry is flushed out before updating
1656 	 * the doorbell.
1657 	 * If the ring has wrapped, we need to flush the front and back.
1658 	 */
1659 	if ((first + ents) > mlwq->mlwq_nents) {
1660 		uint_t sync_cnt = mlwq->mlwq_nents - first;
1661 
1662 		VERIFY0(ddi_dma_sync(mlwq->mlwq_dma.mxdb_dma_handle,
1663 		    (uintptr_t)ent0 - (uintptr_t)mlwq->mlwq_send_ent,
1664 		    sync_cnt * sizeof (mlxcx_sendq_ent_t),
1665 		    DDI_DMA_SYNC_FORDEV));
1666 
1667 		ent0 = &mlwq->mlwq_send_ent[0];
1668 		ents -= sync_cnt;
1669 	}
1670 
1671 	VERIFY0(ddi_dma_sync(mlwq->mlwq_dma.mxdb_dma_handle,
1672 	    (uintptr_t)ent0 - (uintptr_t)mlwq->mlwq_send_ent,
1673 	    ents * sizeof (mlxcx_sendq_ent_t), DDI_DMA_SYNC_FORDEV));
1674 	ddi_fm_dma_err_get(mlwq->mlwq_dma.mxdb_dma_handle, &err,
1675 	    DDI_FME_VERSION);
1676 	if (err.fme_status != DDI_FM_OK) {
1677 		return (B_FALSE);
1678 	}
1679 
1680 	/*
1681 	 * Hold the bufmtx whilst ringing the doorbell, to prevent
1682 	 * the buffer from being moved to another list, so we can
1683 	 * safely remove it should the ring fail.
1684 	 */
1685 	mutex_enter(&cq->mlcq_bufbmtx);
1686 
1687 	list_insert_tail(&cq->mlcq_buffers_b, b0);
1688 	if ((rv = mlxcx_sq_ring_dbell(mlxp, mlwq, first))) {
1689 		atomic_inc_64(&cq->mlcq_bufcnt);
1690 	} else {
1691 		list_remove(&cq->mlcq_buffers_b, b0);
1692 	}
1693 
1694 	mutex_exit(&cq->mlcq_bufbmtx);
1695 
1696 	return (rv);
1697 }
1698 
1699 boolean_t
1700 mlxcx_rq_add_buffer(mlxcx_t *mlxp, mlxcx_work_queue_t *mlwq,
1701     mlxcx_buffer_t *buf)
1702 {
1703 	return (mlxcx_rq_add_buffers(mlxp, mlwq, &buf, 1));
1704 }
1705 
1706 boolean_t
1707 mlxcx_rq_add_buffers(mlxcx_t *mlxp, mlxcx_work_queue_t *mlwq,
1708     mlxcx_buffer_t **bufs, size_t nbufs)
1709 {
1710 	uint_t index;
1711 	mlxcx_recvq_ent_t *ent;
1712 	mlxcx_completion_queue_t *cq;
1713 	mlxcx_wqe_data_seg_t *seg;
1714 	uint_t bi, ptri;
1715 	const ddi_dma_cookie_t *c;
1716 	mlxcx_buffer_t *buf;
1717 	ddi_fm_error_t err;
1718 
1719 	ASSERT(mutex_owned(&mlwq->mlwq_mtx));
1720 	cq = mlwq->mlwq_cq;
1721 	ASSERT(mutex_owned(&cq->mlcq_mtx));
1722 
1723 	for (bi = 0; bi < nbufs; ++bi) {
1724 		buf = bufs[bi];
1725 		bufs[bi] = NULL;
1726 		ASSERT3U(buf->mlb_state, ==, MLXCX_BUFFER_ON_WQ);
1727 
1728 		index = mlwq->mlwq_pc & (mlwq->mlwq_nents - 1);
1729 		ent = &mlwq->mlwq_recv_ent[index];
1730 		buf->mlb_wqe_index = mlwq->mlwq_pc;
1731 		buf->mlb_wqebbs = 1;
1732 
1733 		++mlwq->mlwq_pc;
1734 		atomic_inc_64(&mlwq->mlwq_wqebb_used);
1735 
1736 		mutex_enter(&cq->mlcq_bufbmtx);
1737 		list_insert_tail(&cq->mlcq_buffers, buf);
1738 		atomic_inc_64(&cq->mlcq_bufcnt);
1739 		mutex_exit(&cq->mlcq_bufbmtx);
1740 
1741 		ASSERT3U(buf->mlb_dma.mxdb_ncookies, <=, MLXCX_RECVQ_MAX_PTRS);
1742 		ptri = 0;
1743 		c = NULL;
1744 		while ((c = mlxcx_dma_cookie_iter(&buf->mlb_dma, c)) != NULL) {
1745 			seg = &ent->mlrqe_data[ptri++];
1746 			seg->mlds_lkey = to_be32(mlxp->mlx_rsvd_lkey);
1747 			seg->mlds_byte_count = to_be32(c->dmac_size);
1748 			seg->mlds_address = to_be64(c->dmac_laddress);
1749 		}
1750 		/*
1751 		 * Fill any unused scatter pointers with the special null
1752 		 * value.
1753 		 */
1754 		for (; ptri < MLXCX_RECVQ_MAX_PTRS; ++ptri) {
1755 			seg = &ent->mlrqe_data[ptri];
1756 			seg->mlds_lkey = to_be32(MLXCX_NULL_LKEY);
1757 			seg->mlds_byte_count = to_be32(0);
1758 			seg->mlds_address = to_be64(0);
1759 		}
1760 
1761 		/*
1762 		 * Make sure the workqueue entry is flushed out before updating
1763 		 * the doorbell.
1764 		 */
1765 		VERIFY0(ddi_dma_sync(mlwq->mlwq_dma.mxdb_dma_handle,
1766 		    (uintptr_t)ent - (uintptr_t)mlwq->mlwq_recv_ent,
1767 		    sizeof (mlxcx_recvq_ent_t), DDI_DMA_SYNC_FORDEV));
1768 		ddi_fm_dma_err_get(mlwq->mlwq_dma.mxdb_dma_handle, &err,
1769 		    DDI_FME_VERSION);
1770 		if (err.fme_status != DDI_FM_OK) {
1771 			return (B_FALSE);
1772 		}
1773 	}
1774 
1775 	mlwq->mlwq_doorbell->mlwqd_recv_counter = to_be16(mlwq->mlwq_pc);
1776 	/*
1777 	 * Flush the CQ doorbell as well so that HW knows how many
1778 	 * completions we've consumed.
1779 	 */
1780 	MLXCX_DMA_SYNC(cq->mlcq_doorbell_dma, DDI_DMA_SYNC_FORDEV);
1781 	ddi_fm_dma_err_get(cq->mlcq_doorbell_dma.mxdb_dma_handle, &err,
1782 	    DDI_FME_VERSION);
1783 	if (err.fme_status != DDI_FM_OK) {
1784 		return (B_FALSE);
1785 	}
1786 	MLXCX_DMA_SYNC(mlwq->mlwq_doorbell_dma, DDI_DMA_SYNC_FORDEV);
1787 	ddi_fm_dma_err_get(mlwq->mlwq_doorbell_dma.mxdb_dma_handle, &err,
1788 	    DDI_FME_VERSION);
1789 	if (err.fme_status != DDI_FM_OK) {
1790 		return (B_FALSE);
1791 	}
1792 	return (B_TRUE);
1793 }
1794 
1795 static void
1796 mlxcx_rq_refill_task(void *arg)
1797 {
1798 	mlxcx_work_queue_t *wq = arg;
1799 	mlxcx_completion_queue_t *cq = wq->mlwq_cq;
1800 	mlxcx_t *mlxp = wq->mlwq_mlx;
1801 	mlxcx_buf_shard_t *s = wq->mlwq_bufs;
1802 	boolean_t refill;
1803 
1804 	do {
1805 		/*
1806 		 * Wait until there are some free buffers.
1807 		 */
1808 		mutex_enter(&s->mlbs_mtx);
1809 		while (list_is_empty(&s->mlbs_free) &&
1810 		    (cq->mlcq_state & MLXCX_CQ_TEARDOWN) == 0)
1811 			cv_wait(&s->mlbs_free_nonempty, &s->mlbs_mtx);
1812 		mutex_exit(&s->mlbs_mtx);
1813 
1814 		mutex_enter(&cq->mlcq_mtx);
1815 		mutex_enter(&wq->mlwq_mtx);
1816 
1817 		if ((cq->mlcq_state & MLXCX_CQ_TEARDOWN) != 0) {
1818 			refill = B_FALSE;
1819 			wq->mlwq_state &= ~MLXCX_WQ_REFILLING;
1820 		} else {
1821 			mlxcx_rq_refill(mlxp, wq);
1822 
1823 			if (cq->mlcq_bufcnt < MLXCX_RQ_REFILL_STEP) {
1824 				refill = B_TRUE;
1825 			} else {
1826 				refill = B_FALSE;
1827 				wq->mlwq_state &= ~MLXCX_WQ_REFILLING;
1828 			}
1829 		}
1830 
1831 		mutex_exit(&wq->mlwq_mtx);
1832 		mutex_exit(&cq->mlcq_mtx);
1833 	} while (refill);
1834 }
1835 
1836 void
1837 mlxcx_rq_refill(mlxcx_t *mlxp, mlxcx_work_queue_t *mlwq)
1838 {
1839 	size_t target, current, want, done, n;
1840 	mlxcx_completion_queue_t *cq;
1841 	mlxcx_ring_group_t *g;
1842 	mlxcx_buffer_t *b[MLXCX_RQ_REFILL_STEP];
1843 	uint_t i;
1844 
1845 	ASSERT(mutex_owned(&mlwq->mlwq_mtx));
1846 	cq = mlwq->mlwq_cq;
1847 	ASSERT(mutex_owned(&cq->mlcq_mtx));
1848 
1849 	ASSERT(mlwq->mlwq_state & MLXCX_WQ_BUFFERS);
1850 
1851 	target = mlwq->mlwq_nents - MLXCX_RQ_REFILL_STEP;
1852 	cq = mlwq->mlwq_cq;
1853 
1854 	if (cq->mlcq_state & MLXCX_CQ_TEARDOWN)
1855 		return;
1856 
1857 	current = cq->mlcq_bufcnt;
1858 
1859 	if (current >= target - MLXCX_RQ_REFILL_STEP)
1860 		return;
1861 
1862 	want = target - current;
1863 	done = 0;
1864 
1865 	while (!(mlwq->mlwq_state & MLXCX_WQ_TEARDOWN) && done < want) {
1866 		n = mlxcx_buf_take_n(mlxp, mlwq, b, MLXCX_RQ_REFILL_STEP);
1867 		if (n == 0) {
1868 			/*
1869 			 * We didn't get any buffers from the free queue.
1870 			 * It might not be an issue, schedule a taskq
1871 			 * to wait for free buffers if the completion
1872 			 * queue is low.
1873 			 */
1874 			if (current < MLXCX_RQ_REFILL_STEP &&
1875 			    (mlwq->mlwq_state & MLXCX_WQ_REFILLING) == 0) {
1876 				mlwq->mlwq_state |= MLXCX_WQ_REFILLING;
1877 				g = mlwq->mlwq_group;
1878 				taskq_dispatch_ent(g->mlg_refill_tq,
1879 				    mlxcx_rq_refill_task, mlwq, TQ_NOSLEEP,
1880 				    &mlwq->mlwq_tqe);
1881 			}
1882 
1883 			return;
1884 		}
1885 
1886 		if (mlwq->mlwq_state & MLXCX_WQ_TEARDOWN) {
1887 			for (i = 0; i < n; ++i)
1888 				mlxcx_buf_return(mlxp, b[i]);
1889 			return;
1890 		}
1891 		if (!mlxcx_rq_add_buffers(mlxp, mlwq, b, n)) {
1892 			/*
1893 			 * mlxcx_rq_add_buffers NULLs out the buffers as it
1894 			 * enqueues them, so any that are non-NULL we have to
1895 			 * free now. The others now belong to the WQ, even if
1896 			 * we failed.
1897 			 */
1898 			for (i = 0; i < n; ++i) {
1899 				if (b[i] != NULL) {
1900 					mlxcx_buf_return(mlxp, b[i]);
1901 				}
1902 			}
1903 			return;
1904 		}
1905 		done += n;
1906 	}
1907 }
1908 
1909 static const char *
1910 mlxcx_cq_err_syndrome_string(mlxcx_cq_error_syndrome_t sy)
1911 {
1912 	switch (sy) {
1913 	case MLXCX_CQ_ERR_LOCAL_LENGTH:
1914 		return ("LOCAL_LENGTH");
1915 	case MLXCX_CQ_ERR_LOCAL_QP_OP:
1916 		return ("LOCAL_QP_OP");
1917 	case MLXCX_CQ_ERR_LOCAL_PROTECTION:
1918 		return ("LOCAL_PROTECTION");
1919 	case MLXCX_CQ_ERR_WR_FLUSHED:
1920 		return ("WR_FLUSHED");
1921 	case MLXCX_CQ_ERR_MEM_WINDOW_BIND:
1922 		return ("MEM_WINDOW_BIND");
1923 	case MLXCX_CQ_ERR_BAD_RESPONSE:
1924 		return ("BAD_RESPONSE");
1925 	case MLXCX_CQ_ERR_LOCAL_ACCESS:
1926 		return ("LOCAL_ACCESS");
1927 	case MLXCX_CQ_ERR_XPORT_RETRY_CTR:
1928 		return ("XPORT_RETRY_CTR");
1929 	case MLXCX_CQ_ERR_RNR_RETRY_CTR:
1930 		return ("RNR_RETRY_CTR");
1931 	case MLXCX_CQ_ERR_ABORTED:
1932 		return ("ABORTED");
1933 	default:
1934 		return ("UNKNOWN");
1935 	}
1936 }
1937 
1938 static void
1939 mlxcx_fm_cqe_ereport(mlxcx_t *mlxp, mlxcx_completion_queue_t *mlcq,
1940     mlxcx_completionq_error_ent_t *ent)
1941 {
1942 	uint64_t ena;
1943 	char buf[FM_MAX_CLASS];
1944 	const char *name = mlxcx_cq_err_syndrome_string(ent->mlcqee_syndrome);
1945 
1946 	if (!DDI_FM_EREPORT_CAP(mlxp->mlx_fm_caps))
1947 		return;
1948 
1949 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
1950 	    MLXCX_FM_SERVICE_MLXCX, "cqe.err");
1951 	ena = fm_ena_generate(0, FM_ENA_FMT1);
1952 
1953 	ddi_fm_ereport_post(mlxp->mlx_dip, buf, ena, DDI_NOSLEEP,
1954 	    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
1955 	    "syndrome", DATA_TYPE_STRING, name,
1956 	    "syndrome_num", DATA_TYPE_UINT8, ent->mlcqee_syndrome,
1957 	    "vendor_syndrome", DATA_TYPE_UINT8,
1958 	    ent->mlcqee_vendor_error_syndrome,
1959 	    "wqe_counter", DATA_TYPE_UINT16, from_be16(ent->mlcqee_wqe_counter),
1960 	    "wq_type", DATA_TYPE_STRING,
1961 	    (mlcq->mlcq_wq->mlwq_type == MLXCX_WQ_TYPE_SENDQ) ? "send": "recv",
1962 	    "cq_num", DATA_TYPE_UINT32, mlcq->mlcq_num,
1963 	    "wq_num", DATA_TYPE_UINT32, mlcq->mlcq_wq->mlwq_num,
1964 	    NULL);
1965 	ddi_fm_service_impact(mlxp->mlx_dip, DDI_SERVICE_DEGRADED);
1966 }
1967 
1968 void
1969 mlxcx_tx_completion(mlxcx_t *mlxp, mlxcx_completion_queue_t *mlcq,
1970     mlxcx_completionq_ent_t *ent, mlxcx_buffer_t *buf)
1971 {
1972 	ASSERT(mutex_owned(&mlcq->mlcq_mtx));
1973 	if (ent->mlcqe_opcode == MLXCX_CQE_OP_REQ_ERR) {
1974 		mlxcx_completionq_error_ent_t *eent =
1975 		    (mlxcx_completionq_error_ent_t *)ent;
1976 		mlxcx_fm_cqe_ereport(mlxp, mlcq, eent);
1977 		mlxcx_buf_return_chain(mlxp, buf, B_FALSE);
1978 		mutex_enter(&mlcq->mlcq_wq->mlwq_mtx);
1979 		mlxcx_check_sq(mlxp, mlcq->mlcq_wq);
1980 		mutex_exit(&mlcq->mlcq_wq->mlwq_mtx);
1981 		return;
1982 	}
1983 
1984 	if (ent->mlcqe_opcode != MLXCX_CQE_OP_REQ) {
1985 		mlxcx_warn(mlxp, "!got weird cq opcode: %x", ent->mlcqe_opcode);
1986 		mlxcx_buf_return_chain(mlxp, buf, B_FALSE);
1987 		return;
1988 	}
1989 
1990 	if (ent->mlcqe_send_wqe_opcode != MLXCX_WQE_OP_SEND) {
1991 		mlxcx_warn(mlxp, "!got weird cq wqe opcode: %x",
1992 		    ent->mlcqe_send_wqe_opcode);
1993 		mlxcx_buf_return_chain(mlxp, buf, B_FALSE);
1994 		return;
1995 	}
1996 
1997 	if (ent->mlcqe_format != MLXCX_CQE_FORMAT_BASIC) {
1998 		mlxcx_warn(mlxp, "!got weird cq format: %x", ent->mlcqe_format);
1999 		mlxcx_buf_return_chain(mlxp, buf, B_FALSE);
2000 		return;
2001 	}
2002 
2003 	mlxcx_buf_return_chain(mlxp, buf, B_FALSE);
2004 }
2005 
2006 mblk_t *
2007 mlxcx_rx_completion(mlxcx_t *mlxp, mlxcx_completion_queue_t *mlcq,
2008     mlxcx_completionq_ent_t *ent, mlxcx_buffer_t *buf)
2009 {
2010 	uint32_t chkflags = 0;
2011 	uint_t wqe_index;
2012 	ddi_fm_error_t err;
2013 
2014 	ASSERT(mutex_owned(&mlcq->mlcq_mtx));
2015 
2016 	if (ent->mlcqe_opcode == MLXCX_CQE_OP_RESP_ERR) {
2017 		mlxcx_completionq_error_ent_t *eent =
2018 		    (mlxcx_completionq_error_ent_t *)ent;
2019 		mlxcx_fm_cqe_ereport(mlxp, mlcq, eent);
2020 		mlxcx_buf_return(mlxp, buf);
2021 		mutex_enter(&mlcq->mlcq_wq->mlwq_mtx);
2022 		mlxcx_check_rq(mlxp, mlcq->mlcq_wq);
2023 		mutex_exit(&mlcq->mlcq_wq->mlwq_mtx);
2024 		return (NULL);
2025 	}
2026 
2027 	if (ent->mlcqe_opcode != MLXCX_CQE_OP_RESP) {
2028 		mlxcx_warn(mlxp, "!got weird cq opcode: %x", ent->mlcqe_opcode);
2029 		mlxcx_buf_return(mlxp, buf);
2030 		return (NULL);
2031 	}
2032 
2033 	if (ent->mlcqe_format != MLXCX_CQE_FORMAT_BASIC) {
2034 		mlxcx_warn(mlxp, "!got weird cq format: %x", ent->mlcqe_format);
2035 		mlxcx_buf_return(mlxp, buf);
2036 		return (NULL);
2037 	}
2038 
2039 	if (ent->mlcqe_rx_drop_counter > 0) {
2040 		atomic_add_64(&mlcq->mlcq_stats->mlps_rx_drops,
2041 		    ent->mlcqe_rx_drop_counter);
2042 	}
2043 
2044 	MLXCX_DMA_SYNC(buf->mlb_dma, DDI_DMA_SYNC_FORCPU);
2045 	ddi_fm_dma_err_get(buf->mlb_dma.mxdb_dma_handle, &err,
2046 	    DDI_FME_VERSION);
2047 	if (err.fme_status != DDI_FM_OK) {
2048 		ddi_fm_dma_err_clear(buf->mlb_dma.mxdb_dma_handle,
2049 		    DDI_FME_VERSION);
2050 		mlxcx_buf_return(mlxp, buf);
2051 		return (NULL);
2052 	}
2053 
2054 	/*
2055 	 * mlxcx_buf_loan() will set mlb_wqe_index to zero.
2056 	 * Remember it for later.
2057 	 */
2058 	wqe_index = buf->mlb_wqe_index;
2059 
2060 	if (!mlxcx_buf_loan(mlxp, buf)) {
2061 		mlxcx_warn(mlxp, "!loan failed, dropping packet");
2062 		mlxcx_buf_return(mlxp, buf);
2063 		return (NULL);
2064 	}
2065 
2066 	buf->mlb_mp->b_next = NULL;
2067 	buf->mlb_mp->b_cont = NULL;
2068 	buf->mlb_mp->b_wptr = buf->mlb_mp->b_rptr +
2069 	    from_be32(ent->mlcqe_byte_cnt);
2070 
2071 	if (get_bit8(ent->mlcqe_csflags, MLXCX_CQE_CSFLAGS_L4_OK)) {
2072 		chkflags |= HCK_FULLCKSUM_OK;
2073 	}
2074 	if (get_bit8(ent->mlcqe_csflags, MLXCX_CQE_CSFLAGS_L3_OK)) {
2075 		chkflags |= HCK_IPV4_HDRCKSUM_OK;
2076 	}
2077 	if (chkflags != 0) {
2078 		mac_hcksum_set(buf->mlb_mp, 0, 0, 0,
2079 		    from_be16(ent->mlcqe_checksum), chkflags);
2080 	}
2081 
2082 	/*
2083 	 * Don't check if a refill is needed on every single completion,
2084 	 * since checking involves taking the RQ lock.
2085 	 */
2086 	if ((wqe_index & 0x7) == 0) {
2087 		mlxcx_work_queue_t *wq = mlcq->mlcq_wq;
2088 		ASSERT(wq != NULL);
2089 		mutex_enter(&wq->mlwq_mtx);
2090 		if (!(wq->mlwq_state & MLXCX_WQ_TEARDOWN))
2091 			mlxcx_rq_refill(mlxp, wq);
2092 		mutex_exit(&wq->mlwq_mtx);
2093 	}
2094 
2095 	return (buf->mlb_mp);
2096 }
2097 
2098 static void
2099 mlxcx_buf_mp_return(caddr_t arg)
2100 {
2101 	mlxcx_buffer_t *b = (mlxcx_buffer_t *)arg;
2102 	mlxcx_t *mlxp = b->mlb_mlx;
2103 
2104 	if (b->mlb_state != MLXCX_BUFFER_ON_LOAN) {
2105 		b->mlb_mp = NULL;
2106 		return;
2107 	}
2108 	/*
2109 	 * The mblk for this buffer_t (in its mlb_mp field) has been used now,
2110 	 * so NULL it out.
2111 	 */
2112 	b->mlb_mp = NULL;
2113 	mlxcx_buf_return(mlxp, b);
2114 }
2115 
2116 boolean_t
2117 mlxcx_buf_create(mlxcx_t *mlxp, mlxcx_buf_shard_t *shard, mlxcx_buffer_t **bp)
2118 {
2119 	mlxcx_buffer_t *b;
2120 	ddi_device_acc_attr_t acc;
2121 	ddi_dma_attr_t attr;
2122 	boolean_t ret;
2123 
2124 	b = kmem_cache_alloc(mlxp->mlx_bufs_cache, KM_SLEEP);
2125 	b->mlb_shard = shard;
2126 	b->mlb_foreign = B_FALSE;
2127 
2128 	mlxcx_dma_acc_attr(mlxp, &acc);
2129 	mlxcx_dma_buf_attr(mlxp, &attr);
2130 
2131 	ret = mlxcx_dma_alloc_offset(mlxp, &b->mlb_dma, &attr, &acc,
2132 	    B_FALSE, mlxp->mlx_ports[0].mlp_mtu, 2, B_TRUE);
2133 	if (!ret) {
2134 		kmem_cache_free(mlxp->mlx_bufs_cache, b);
2135 		return (B_FALSE);
2136 	}
2137 
2138 	b->mlb_frtn.free_func = mlxcx_buf_mp_return;
2139 	b->mlb_frtn.free_arg = (caddr_t)b;
2140 	b->mlb_mp = desballoc((unsigned char *)b->mlb_dma.mxdb_va,
2141 	    b->mlb_dma.mxdb_len, 0, &b->mlb_frtn);
2142 
2143 	*bp = b;
2144 
2145 	return (B_TRUE);
2146 }
2147 
2148 boolean_t
2149 mlxcx_buf_create_foreign(mlxcx_t *mlxp, mlxcx_buf_shard_t *shard,
2150     mlxcx_buffer_t **bp)
2151 {
2152 	mlxcx_buffer_t *b;
2153 	ddi_dma_attr_t attr;
2154 	boolean_t ret;
2155 
2156 	b = kmem_cache_alloc(mlxp->mlx_bufs_cache, KM_SLEEP);
2157 	b->mlb_shard = shard;
2158 	b->mlb_foreign = B_TRUE;
2159 
2160 	mlxcx_dma_buf_attr(mlxp, &attr);
2161 
2162 	ret = mlxcx_dma_init(mlxp, &b->mlb_dma, &attr, B_TRUE);
2163 	if (!ret) {
2164 		kmem_cache_free(mlxp->mlx_bufs_cache, b);
2165 		return (B_FALSE);
2166 	}
2167 
2168 	*bp = b;
2169 
2170 	return (B_TRUE);
2171 }
2172 
2173 static mlxcx_buffer_t *
2174 mlxcx_buf_take_foreign(mlxcx_t *mlxp, mlxcx_work_queue_t *wq)
2175 {
2176 	mlxcx_buffer_t *b;
2177 	mlxcx_buf_shard_t *s = wq->mlwq_foreign_bufs;
2178 
2179 	mutex_enter(&s->mlbs_mtx);
2180 	if ((b = list_remove_head(&s->mlbs_free)) != NULL) {
2181 		ASSERT3U(b->mlb_state, ==, MLXCX_BUFFER_FREE);
2182 		ASSERT(b->mlb_foreign);
2183 		b->mlb_state = MLXCX_BUFFER_ON_WQ;
2184 		list_insert_tail(&s->mlbs_busy, b);
2185 	}
2186 	mutex_exit(&s->mlbs_mtx);
2187 
2188 	return (b);
2189 }
2190 
2191 static mlxcx_buffer_t *
2192 mlxcx_copy_data(mlxcx_t *mlxp, mlxcx_work_queue_t *wq, uint8_t *rptr, size_t sz)
2193 {
2194 	ddi_fm_error_t err;
2195 	mlxcx_buffer_t *b;
2196 	uint_t attempts = 0;
2197 
2198 copyb:
2199 	if ((b = mlxcx_buf_take(mlxp, wq)) == NULL)
2200 		return (NULL);
2201 
2202 	ASSERT3U(b->mlb_dma.mxdb_len, >=, sz);
2203 	bcopy(rptr, b->mlb_dma.mxdb_va, sz);
2204 
2205 	MLXCX_DMA_SYNC(b->mlb_dma, DDI_DMA_SYNC_FORDEV);
2206 
2207 	ddi_fm_dma_err_get(b->mlb_dma.mxdb_dma_handle, &err,
2208 	    DDI_FME_VERSION);
2209 	if (err.fme_status != DDI_FM_OK) {
2210 		ddi_fm_dma_err_clear(b->mlb_dma.mxdb_dma_handle,
2211 		    DDI_FME_VERSION);
2212 		mlxcx_buf_return(mlxp, b);
2213 		if (++attempts > MLXCX_BUF_BIND_MAX_ATTEMTPS) {
2214 			return (NULL);
2215 		}
2216 		goto copyb;
2217 	}
2218 
2219 	return (b);
2220 }
2221 
2222 static mlxcx_buffer_t *
2223 mlxcx_bind_or_copy_mblk(mlxcx_t *mlxp, mlxcx_work_queue_t *wq,
2224     mblk_t *mp, size_t off)
2225 {
2226 	mlxcx_buffer_t *b;
2227 	uint8_t *rptr;
2228 	size_t sz;
2229 	boolean_t ret;
2230 
2231 	rptr = mp->b_rptr;
2232 	sz = MBLKL(mp);
2233 
2234 #ifdef DEBUG
2235 	if (off > 0) {
2236 		ASSERT3U(off, <, sz);
2237 	}
2238 #endif
2239 
2240 	rptr += off;
2241 	sz -= off;
2242 
2243 	if (sz < mlxp->mlx_props.mldp_tx_bind_threshold) {
2244 		b = mlxcx_copy_data(mlxp, wq, rptr, sz);
2245 	} else {
2246 		b = mlxcx_buf_take_foreign(mlxp, wq);
2247 		if (b == NULL)
2248 			return (NULL);
2249 
2250 		ret = mlxcx_dma_bind_mblk(mlxp, &b->mlb_dma, mp, off,
2251 		    B_FALSE);
2252 
2253 		if (!ret) {
2254 			mlxcx_buf_return(mlxp, b);
2255 
2256 			b = mlxcx_copy_data(mlxp, wq, rptr, sz);
2257 		}
2258 	}
2259 
2260 	return (b);
2261 }
2262 
2263 uint_t
2264 mlxcx_buf_bind_or_copy(mlxcx_t *mlxp, mlxcx_work_queue_t *wq,
2265     mblk_t *mpb, size_t off, mlxcx_buffer_t **bp)
2266 {
2267 	mlxcx_buffer_t *b, *b0 = NULL;
2268 	boolean_t first = B_TRUE;
2269 	mblk_t *mp;
2270 	size_t offset = off;
2271 	size_t ncookies = 0;
2272 	uint_t count = 0;
2273 
2274 	for (mp = mpb; mp != NULL && ncookies <= MLXCX_SQE_MAX_PTRS;
2275 	    mp = mp->b_cont) {
2276 		b = mlxcx_bind_or_copy_mblk(mlxp, wq, mp, offset);
2277 		if (b == NULL)
2278 			goto failed;
2279 
2280 		ncookies += b->mlb_dma.mxdb_ncookies;
2281 
2282 		if (first)
2283 			b0 = b;
2284 
2285 		if (!first)
2286 			b->mlb_state = MLXCX_BUFFER_ON_CHAIN;
2287 
2288 		b->mlb_tx_mp = mp;
2289 		b->mlb_tx_head = b0;
2290 		b->mlb_used = MBLKL(mp) - offset;
2291 
2292 		if (!first)
2293 			list_insert_tail(&b0->mlb_tx_chain, b);
2294 		first = B_FALSE;
2295 		offset = 0;
2296 
2297 		count++;
2298 	}
2299 
2300 	/*
2301 	 * The chain of mblks has resulted in too many cookies for
2302 	 * a single message. This is unusual, so take the hit to tidy
2303 	 * up, do a pullup to a single mblk and allocate the requisite
2304 	 * buf.
2305 	 */
2306 	if (ncookies > MLXCX_SQE_MAX_PTRS) {
2307 		DTRACE_PROBE4(pullup, mlxcx_t *, mlxp, mlxcx_work_queue_t *, wq,
2308 		    mblk_t *, mpb, size_t, ncookies);
2309 
2310 		if (b0 != NULL)
2311 			mlxcx_buf_return_chain(mlxp, b0, B_TRUE);
2312 
2313 		if ((mp = msgpullup(mpb, -1)) == NULL)
2314 			return (0);
2315 
2316 		b0 = mlxcx_bind_or_copy_mblk(mlxp, wq, mp, off);
2317 		if (b0 == NULL) {
2318 			freemsg(mp);
2319 			return (0);
2320 		}
2321 		freemsg(mpb);
2322 
2323 		b0->mlb_tx_mp = mp;
2324 		b0->mlb_tx_head = b0;
2325 		b0->mlb_used = MBLKL(mp) - off;
2326 
2327 		count = 1;
2328 	}
2329 
2330 	*bp = b0;
2331 
2332 	return (count);
2333 
2334 failed:
2335 	if (b0 != NULL)
2336 		mlxcx_buf_return_chain(mlxp, b0, B_TRUE);
2337 
2338 	return (0);
2339 }
2340 
2341 mlxcx_buffer_t *
2342 mlxcx_buf_take(mlxcx_t *mlxp, mlxcx_work_queue_t *wq)
2343 {
2344 	mlxcx_buffer_t *b;
2345 	mlxcx_buf_shard_t *s = wq->mlwq_bufs;
2346 
2347 	mutex_enter(&s->mlbs_mtx);
2348 	if ((b = list_remove_head(&s->mlbs_free)) != NULL) {
2349 		ASSERT3U(b->mlb_state, ==, MLXCX_BUFFER_FREE);
2350 		b->mlb_state = MLXCX_BUFFER_ON_WQ;
2351 		list_insert_tail(&s->mlbs_busy, b);
2352 	}
2353 	mutex_exit(&s->mlbs_mtx);
2354 
2355 	return (b);
2356 }
2357 
2358 size_t
2359 mlxcx_buf_take_n(mlxcx_t *mlxp, mlxcx_work_queue_t *wq,
2360     mlxcx_buffer_t **bp, size_t nbufs)
2361 {
2362 	mlxcx_buffer_t *b;
2363 	size_t done = 0;
2364 	mlxcx_buf_shard_t *s;
2365 
2366 	s = wq->mlwq_bufs;
2367 
2368 	mutex_enter(&s->mlbs_mtx);
2369 	while (done < nbufs && (b = list_remove_head(&s->mlbs_free)) != NULL) {
2370 		ASSERT3U(b->mlb_state, ==, MLXCX_BUFFER_FREE);
2371 		b->mlb_state = MLXCX_BUFFER_ON_WQ;
2372 		list_insert_tail(&s->mlbs_busy, b);
2373 		bp[done++] = b;
2374 	}
2375 	mutex_exit(&s->mlbs_mtx);
2376 	return (done);
2377 }
2378 
2379 boolean_t
2380 mlxcx_buf_loan(mlxcx_t *mlxp, mlxcx_buffer_t *b)
2381 {
2382 	VERIFY3U(b->mlb_state, ==, MLXCX_BUFFER_ON_WQ);
2383 	ASSERT3P(b->mlb_mlx, ==, mlxp);
2384 
2385 	if (b->mlb_mp == NULL) {
2386 		b->mlb_mp = desballoc((unsigned char *)b->mlb_dma.mxdb_va,
2387 		    b->mlb_dma.mxdb_len, 0, &b->mlb_frtn);
2388 		if (b->mlb_mp == NULL)
2389 			return (B_FALSE);
2390 	}
2391 
2392 	b->mlb_state = MLXCX_BUFFER_ON_LOAN;
2393 	b->mlb_wqe_index = 0;
2394 	return (B_TRUE);
2395 }
2396 
2397 void
2398 mlxcx_buf_return_chain(mlxcx_t *mlxp, mlxcx_buffer_t *b0, boolean_t keepmp)
2399 {
2400 	mlxcx_buffer_t *b;
2401 
2402 	if (b0->mlb_tx_head != b0) {
2403 		mlxcx_buf_return(mlxp, b0);
2404 		return;
2405 	}
2406 
2407 	while ((b = list_head(&b0->mlb_tx_chain)) != NULL) {
2408 		mlxcx_buf_return(mlxp, b);
2409 	}
2410 	if (keepmp) {
2411 		b0->mlb_tx_mp = NULL;
2412 		b0->mlb_tx_head = NULL;
2413 	}
2414 	mlxcx_buf_return(mlxp, b0);
2415 }
2416 
2417 void
2418 mlxcx_buf_return(mlxcx_t *mlxp, mlxcx_buffer_t *b)
2419 {
2420 	mlxcx_buffer_state_t oldstate = b->mlb_state;
2421 	mlxcx_buffer_t *txhead = b->mlb_tx_head;
2422 	mlxcx_buf_shard_t *s = b->mlb_shard;
2423 	mblk_t *mp = b->mlb_tx_mp;
2424 
2425 	VERIFY3U(oldstate, !=, MLXCX_BUFFER_FREE);
2426 	ASSERT3P(b->mlb_mlx, ==, mlxp);
2427 
2428 	/*
2429 	 * The mlbs_mtx held below is a heavily contended lock, so it is
2430 	 * imperative we do as much of the buffer clean up outside the lock
2431 	 * as is possible.
2432 	 */
2433 	b->mlb_state = MLXCX_BUFFER_FREE;
2434 	b->mlb_wqe_index = 0;
2435 	b->mlb_tx_head = NULL;
2436 	b->mlb_tx_mp = NULL;
2437 	b->mlb_used = 0;
2438 	b->mlb_wqebbs = 0;
2439 	ASSERT(list_is_empty(&b->mlb_tx_chain));
2440 
2441 	if (b->mlb_foreign) {
2442 		if (b->mlb_dma.mxdb_flags & MLXCX_DMABUF_BOUND) {
2443 			mlxcx_dma_unbind(mlxp, &b->mlb_dma);
2444 		}
2445 	}
2446 
2447 	mutex_enter(&s->mlbs_mtx);
2448 	switch (oldstate) {
2449 	case MLXCX_BUFFER_INIT:
2450 		break;
2451 	case MLXCX_BUFFER_ON_WQ:
2452 		list_remove(&s->mlbs_busy, b);
2453 		break;
2454 	case MLXCX_BUFFER_ON_LOAN:
2455 		ASSERT(!b->mlb_foreign);
2456 		list_remove(&s->mlbs_busy, b);
2457 		break;
2458 	case MLXCX_BUFFER_FREE:
2459 		VERIFY(0);
2460 		break;
2461 	case MLXCX_BUFFER_ON_CHAIN:
2462 		ASSERT(txhead != NULL);
2463 		list_remove(&txhead->mlb_tx_chain, b);
2464 		list_remove(&s->mlbs_busy, b);
2465 		break;
2466 	}
2467 
2468 	list_insert_tail(&s->mlbs_free, b);
2469 	cv_signal(&s->mlbs_free_nonempty);
2470 
2471 	mutex_exit(&s->mlbs_mtx);
2472 
2473 	/*
2474 	 * For TX chain heads, free the mblk_t after we let go of the lock.
2475 	 * This might be a borrowed buf that we in turn loaned to MAC, in which
2476 	 * case calling freemsg() on it will re-enter this very function -- so
2477 	 * we better not be holding the lock!
2478 	 */
2479 	if (txhead == b)
2480 		freemsg(mp);
2481 }
2482 
2483 void
2484 mlxcx_buf_destroy(mlxcx_t *mlxp, mlxcx_buffer_t *b)
2485 {
2486 	mlxcx_buf_shard_t *s = b->mlb_shard;
2487 	VERIFY(b->mlb_state == MLXCX_BUFFER_FREE ||
2488 	    b->mlb_state == MLXCX_BUFFER_INIT);
2489 	ASSERT(mutex_owned(&s->mlbs_mtx));
2490 	if (b->mlb_state == MLXCX_BUFFER_FREE)
2491 		list_remove(&s->mlbs_free, b);
2492 
2493 	/*
2494 	 * This is going back to the kmem cache, so it needs to be set up in
2495 	 * the same way we expect a new buffer to come out (state INIT, other
2496 	 * fields NULL'd)
2497 	 */
2498 	b->mlb_state = MLXCX_BUFFER_INIT;
2499 	b->mlb_shard = NULL;
2500 	if (b->mlb_mp != NULL) {
2501 		freeb(b->mlb_mp);
2502 		ASSERT(b->mlb_mp == NULL);
2503 	}
2504 	mlxcx_dma_free(&b->mlb_dma);
2505 	ASSERT(list_is_empty(&b->mlb_tx_chain));
2506 
2507 	kmem_cache_free(mlxp->mlx_bufs_cache, b);
2508 }
2509