1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Source file containing Queue handling functions
29  *
30  */
31 
32 #include <oce_impl.h>
33 
34 int oce_destroy_q(struct oce_dev  *oce, struct oce_mbx  *mbx, size_t req_size,
35     enum qtype  qtype);
36 /* MAil box Queue functions */
37 struct oce_mq *
38 oce_mq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len);
39 
40 /* event queue handling */
41 struct oce_eq *
42 oce_eq_create(struct oce_dev *dev, uint32_t q_len, uint32_t item_size,
43     uint32_t eq_delay);
44 
45 /* completion queue handling */
46 struct oce_cq *
47 oce_cq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
48     uint32_t item_size, boolean_t sol_event, boolean_t is_eventable,
49     boolean_t nodelay, uint32_t ncoalesce);
50 
51 
52 /* Tx  WQ functions */
53 static struct oce_wq *oce_wq_init(struct oce_dev *dev,  uint32_t q_len,
54     int wq_type);
55 static void oce_wq_fini(struct oce_dev *dev, struct oce_wq *wq);
56 static int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq);
57 static void oce_wq_del(struct oce_dev *dev, struct oce_wq *wq);
58 /* Rx Queue functions */
59 static struct oce_rq *oce_rq_init(struct oce_dev *dev, uint32_t q_len,
60     uint32_t frag_size, uint32_t mtu,
61     boolean_t rss);
62 static void oce_rq_fini(struct oce_dev *dev, struct oce_rq *rq);
63 static int oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq);
64 static void oce_rq_del(struct oce_dev *dev, struct oce_rq *rq);
65 
66 /*
67  * function to create an event queue
68  *
69  * dev - software handle to the device
70  * eqcfg - pointer to a config structure containg the eq parameters
71  *
72  * return pointer to EQ; NULL on failure
73  */
74 struct oce_eq *
75 oce_eq_create(struct oce_dev *dev, uint32_t q_len, uint32_t item_size,
76     uint32_t eq_delay)
77 {
78 	struct oce_eq *eq;
79 	struct oce_mbx mbx;
80 	struct mbx_create_common_eq *fwcmd;
81 	int ret = 0;
82 
83 	/* allocate an eq */
84 	eq = kmem_zalloc(sizeof (struct oce_eq), KM_NOSLEEP);
85 
86 	if (eq == NULL) {
87 		return (NULL);
88 	}
89 
90 	bzero(&mbx, sizeof (struct oce_mbx));
91 	/* allocate mbx */
92 	fwcmd = (struct mbx_create_common_eq *)&mbx.payload;
93 
94 	eq->ring = create_ring_buffer(dev, q_len,
95 	    item_size, DDI_DMA_CONSISTENT);
96 
97 	if (eq->ring == NULL) {
98 		oce_log(dev, CE_WARN, MOD_CONFIG,
99 		    "EQ ring alloc failed:0x%p",
100 		    (void *)eq->ring);
101 		kmem_free(eq, sizeof (struct oce_eq));
102 		return (NULL);
103 	}
104 
105 	mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
106 	    MBX_SUBSYSTEM_COMMON,
107 	    OPCODE_CREATE_COMMON_EQ, MBX_TIMEOUT_SEC,
108 	    sizeof (struct mbx_create_common_eq));
109 
110 	fwcmd->params.req.num_pages = eq->ring->dbuf->num_pages;
111 	oce_page_list(eq->ring->dbuf, &fwcmd->params.req.pages[0],
112 	    eq->ring->dbuf->num_pages);
113 
114 	/* dw 0 */
115 	fwcmd->params.req.eq_ctx.size = (item_size == 4) ? 0 : 1;
116 	fwcmd->params.req.eq_ctx.valid = 1;
117 	/* dw 1 */
118 	fwcmd->params.req.eq_ctx.armed = 0;
119 	fwcmd->params.req.eq_ctx.pd = 0;
120 	fwcmd->params.req.eq_ctx.count = OCE_LOG2(q_len/256);
121 
122 	/* dw 2 */
123 	fwcmd->params.req.eq_ctx.function = dev->fn;
124 	fwcmd->params.req.eq_ctx.nodelay  = 0;
125 	fwcmd->params.req.eq_ctx.phase = 0;
126 	/* todo: calculate multiplier from max min and cur */
127 	fwcmd->params.req.eq_ctx.delay_mult = eq_delay;
128 
129 	/* fill rest of mbx */
130 	mbx.u0.s.embedded = 1;
131 	mbx.payload_length = sizeof (struct mbx_create_common_eq);
132 	DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
133 
134 	/* now post the command */
135 	ret = oce_mbox_post(dev, &mbx, NULL);
136 
137 	if (ret != 0) {
138 		oce_log(dev, CE_WARN, MOD_CONFIG,
139 		    "EQ create failed: %d", ret);
140 		destroy_ring_buffer(dev, eq->ring);
141 		kmem_free(eq, sizeof (struct oce_eq));
142 		return (NULL);
143 	}
144 
145 	/* interpret the response */
146 	eq->eq_id = LE_16(fwcmd->params.rsp.eq_id);
147 	eq->eq_cfg.q_len = q_len;
148 	eq->eq_cfg.item_size = item_size;
149 	eq->eq_cfg.cur_eqd = (uint8_t)eq_delay;
150 	eq->parent = (void *)dev;
151 	atomic_inc_32(&dev->neqs);
152 	oce_log(dev, CE_NOTE, MOD_CONFIG,
153 	    "EQ created, eq=0x%p eq_id=0x%x", (void *)eq, eq->eq_id);
154 	/* Save the eq pointer */
155 	return (eq);
156 } /* oce_eq_create */
157 
158 /*
159  * function to delete an event queue
160  *
161  * dev - software handle to the device
162  * eq - handle to the eq to be deleted
163  *
164  * return 0=>success, failure otherwise
165  */
166 void
167 oce_eq_del(struct oce_dev *dev, struct oce_eq *eq)
168 {
169 	struct oce_mbx mbx;
170 	struct mbx_destroy_common_eq *fwcmd;
171 
172 	/* drain the residual events */
173 	oce_drain_eq(eq);
174 
175 	/* destroy the ring */
176 	destroy_ring_buffer(dev, eq->ring);
177 	eq->ring = NULL;
178 
179 	/* send a command to delete the EQ */
180 	fwcmd = (struct mbx_destroy_common_eq *)&mbx.payload;
181 	fwcmd->params.req.id = eq->eq_id;
182 	(void) oce_destroy_q(dev, &mbx,
183 	    sizeof (struct mbx_destroy_common_eq),
184 	    QTYPE_EQ);
185 	kmem_free(eq, sizeof (struct oce_eq));
186 	atomic_dec_32(&dev->neqs);
187 }
188 
189 /*
190  * function to create a completion queue
191  *
192  * dev - software handle to the device
193  * eq - optional eq to be associated with to the cq
194  * cqcfg - configuration for this queue
195  *
196  * return pointer to the cq created. NULL on failure
197  */
198 struct oce_cq *
199 oce_cq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
200     uint32_t item_size, boolean_t sol_event, boolean_t is_eventable,
201     boolean_t nodelay, uint32_t ncoalesce)
202 {
203 	struct oce_cq *cq = NULL;
204 	struct oce_mbx mbx;
205 	struct mbx_create_common_cq *fwcmd;
206 	int ret = 0;
207 
208 	/* create cq */
209 	cq = kmem_zalloc(sizeof (struct oce_cq), KM_NOSLEEP);
210 	if (cq == NULL) {
211 		oce_log(dev, CE_NOTE, MOD_CONFIG, "%s",
212 		    "CQ allocation failed");
213 		return (NULL);
214 	}
215 
216 	/* create the ring buffer for this queue */
217 	cq->ring = create_ring_buffer(dev, q_len,
218 	    item_size, DDI_DMA_CONSISTENT);
219 	if (cq->ring == NULL) {
220 		oce_log(dev, CE_WARN, MOD_CONFIG,
221 		    "CQ ring alloc failed:0x%p",
222 		    (void *)cq->ring);
223 		kmem_free(cq, sizeof (struct oce_cq));
224 		return (NULL);
225 	}
226 	/* initialize mailbox */
227 	bzero(&mbx, sizeof (struct oce_mbx));
228 	fwcmd = (struct mbx_create_common_cq *)&mbx.payload;
229 
230 	/* fill the command header */
231 	mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
232 	    MBX_SUBSYSTEM_COMMON,
233 	    OPCODE_CREATE_COMMON_CQ, MBX_TIMEOUT_SEC,
234 	    sizeof (struct mbx_create_common_cq));
235 
236 	/* fill command context */
237 	/* dw0 */
238 	fwcmd->params.req.cq_ctx.eventable = is_eventable;
239 	fwcmd->params.req.cq_ctx.sol_event = sol_event;
240 	fwcmd->params.req.cq_ctx.valid = 1;
241 	fwcmd->params.req.cq_ctx.count = OCE_LOG2(q_len/256);
242 	fwcmd->params.req.cq_ctx.nodelay = nodelay;
243 	fwcmd->params.req.cq_ctx.coalesce_wm = ncoalesce;
244 
245 	/* dw1 */
246 	fwcmd->params.req.cq_ctx.armed = B_FALSE;
247 	fwcmd->params.req.cq_ctx.eq_id = eq->eq_id;
248 	fwcmd->params.req.cq_ctx.pd = 0;
249 	/* dw2 */
250 	fwcmd->params.req.cq_ctx.function = dev->fn;
251 
252 	/* fill the rest of the command */
253 	fwcmd->params.req.num_pages = cq->ring->dbuf->num_pages;
254 	oce_page_list(cq->ring->dbuf, &fwcmd->params.req.pages[0],
255 	    cq->ring->dbuf->num_pages);
256 
257 	/* fill rest of mbx */
258 	mbx.u0.s.embedded = 1;
259 	mbx.payload_length = sizeof (struct mbx_create_common_cq);
260 	DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
261 
262 	/* now send the mail box */
263 	ret = oce_mbox_post(dev, &mbx, NULL);
264 
265 	if (ret != 0) {
266 		oce_log(dev, CE_WARN, MOD_CONFIG,
267 		    "CQ create failed: 0x%x", ret);
268 		destroy_ring_buffer(dev, cq->ring);
269 		kmem_free(cq, sizeof (struct oce_cq));
270 		return (NULL);
271 	}
272 
273 	cq->parent = dev;
274 	cq->eq = eq; /* eq array index */
275 	cq->cq_cfg.q_len = q_len;
276 	cq->cq_cfg.item_size = item_size;
277 	cq->cq_cfg.sol_eventable = (uint8_t)sol_event;
278 	cq->cq_cfg.nodelay = (uint8_t)nodelay;
279 	/* interpret the response */
280 	cq->cq_id = LE_16(fwcmd->params.rsp.cq_id);
281 	dev->cq[cq->cq_id % OCE_MAX_CQ] = cq;
282 	atomic_inc_32(&eq->ref_count);
283 	return (cq);
284 } /* oce_cq_create */
285 
286 /*
287  * function to delete a completion queue
288  *
289  * dev - software handle to the device
290  * cq - handle to the CQ to delete
291  *
292  * return none
293  */
294 static void
295 oce_cq_del(struct oce_dev *dev, struct oce_cq *cq)
296 {
297 	struct oce_mbx mbx;
298 	struct mbx_destroy_common_cq *fwcmd;
299 
300 	/* destroy the ring */
301 	destroy_ring_buffer(dev, cq->ring);
302 	cq->ring = NULL;
303 
304 	bzero(&mbx, sizeof (struct oce_mbx));
305 	/* send a command to delete the CQ */
306 	fwcmd = (struct mbx_destroy_common_cq *)&mbx.payload;
307 	fwcmd->params.req.id = cq->cq_id;
308 	(void) oce_destroy_q(dev, &mbx,
309 	    sizeof (struct mbx_destroy_common_cq),
310 	    QTYPE_CQ);
311 
312 	/* Reset the handler */
313 	cq->cq_handler = NULL;
314 	dev->cq[cq->cq_id % OCE_MAX_CQ] = NULL;
315 	atomic_dec_32(&cq->eq->ref_count);
316 	mutex_destroy(&cq->lock);
317 
318 	/* release the eq */
319 	kmem_free(cq, sizeof (struct oce_cq));
320 } /* oce_cq_del */
321 
322 /*
323  * function to create an MQ
324  *
325  * dev - software handle to the device
326  * eq - the EQ to associate with the MQ for event notification
327  * q_len - the number of entries to create in the MQ
328  *
329  * return pointer to the created MQ, failure otherwise
330  */
331 struct oce_mq *
332 oce_mq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len)
333 {
334 	struct oce_mbx mbx;
335 	struct mbx_create_common_mq *fwcmd;
336 	struct oce_mq *mq = NULL;
337 	int ret = 0;
338 	struct oce_cq  *cq;
339 
340 	/* Create the Completion Q */
341 	cq = oce_cq_create(dev, eq, CQ_LEN_256,
342 	    sizeof (struct oce_mq_cqe),
343 	    B_FALSE, B_TRUE, B_TRUE, 0);
344 	if (cq == NULL) {
345 		return (NULL);
346 	}
347 
348 
349 	/* allocate the mq */
350 	mq = kmem_zalloc(sizeof (struct oce_mq), KM_NOSLEEP);
351 
352 	if (mq == NULL) {
353 		goto mq_alloc_fail;
354 	}
355 
356 	bzero(&mbx, sizeof (struct oce_mbx));
357 	/* allocate mbx */
358 	fwcmd = (struct mbx_create_common_mq *)&mbx.payload;
359 
360 	/* create the ring buffer for this queue */
361 	mq->ring = create_ring_buffer(dev, q_len,
362 	    sizeof (struct oce_mbx), DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
363 	if (mq->ring == NULL) {
364 		oce_log(dev, CE_WARN, MOD_CONFIG,
365 		    "MQ ring alloc failed:0x%p",
366 		    (void *)mq->ring);
367 		goto mq_ring_alloc;
368 	}
369 
370 	mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
371 	    MBX_SUBSYSTEM_COMMON,
372 	    OPCODE_CREATE_COMMON_MQ, MBX_TIMEOUT_SEC,
373 	    sizeof (struct mbx_create_common_mq));
374 
375 	fwcmd->params.req.num_pages = mq->ring->dbuf->num_pages;
376 	oce_page_list(mq->ring->dbuf, fwcmd->params.req.pages,
377 	    mq->ring->dbuf->num_pages);
378 	fwcmd->params.req.context.u0.s.cq_id = cq->cq_id;
379 	fwcmd->params.req.context.u0.s.ring_size =
380 	    OCE_LOG2(q_len) + 1;
381 	fwcmd->params.req.context.u0.s.valid = 1;
382 	fwcmd->params.req.context.u0.s.fid = dev->fn;
383 
384 	/* fill rest of mbx */
385 	mbx.u0.s.embedded = 1;
386 	mbx.payload_length = sizeof (struct mbx_create_common_mq);
387 	DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
388 
389 	/* now send the mail box */
390 	ret = oce_mbox_post(dev, &mbx, NULL);
391 	if (ret != DDI_SUCCESS) {
392 		oce_log(dev, CE_WARN, MOD_CONFIG,
393 		    "MQ create failed: 0x%x", ret);
394 		goto mq_fail;
395 	}
396 
397 	/* interpret the response */
398 	mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
399 	mq->cq = cq;
400 	mq->cfg.q_len = (uint8_t)q_len;
401 	mq->cfg.eqd = 0;
402 
403 	/* fill rest of the mq */
404 	mq->parent = dev;
405 
406 	/* set the MQCQ handlers */
407 	cq->cq_handler = oce_drain_mq_cq;
408 	cq->cb_arg = (void *)mq;
409 	mutex_init(&mq->lock, NULL, MUTEX_DRIVER,
410 	    DDI_INTR_PRI(dev->intr_pri));
411 	return (mq);
412 
413 mq_fail:
414 	destroy_ring_buffer(dev, mq->ring);
415 mq_ring_alloc:
416 	kmem_free(mq, sizeof (struct oce_mq));
417 mq_alloc_fail:
418 	oce_cq_del(dev, cq);
419 	return (NULL);
420 } /* oce_mq_create */
421 
422 /*
423  * function to delete an MQ
424  *
425  * dev - software handle to the device
426  * mq - pointer to the MQ to delete
427  *
428  * return none
429  */
430 static void
431 oce_mq_del(struct oce_dev *dev, struct oce_mq *mq)
432 {
433 	struct oce_mbx mbx;
434 	struct mbx_destroy_common_mq *fwcmd;
435 
436 	/* destroy the ring */
437 	destroy_ring_buffer(dev, mq->ring);
438 	mq->ring = NULL;
439 	bzero(&mbx, sizeof (struct oce_mbx));
440 	fwcmd = (struct mbx_destroy_common_mq *)&mbx.payload;
441 	fwcmd->params.req.id = mq->mq_id;
442 	(void) oce_destroy_q(dev, &mbx,
443 	    sizeof (struct mbx_destroy_common_mq),
444 	    QTYPE_MQ);
445 	oce_cq_del(dev, mq->cq);
446 	mq->cq = NULL;
447 	mutex_destroy(&mq->lock);
448 	kmem_free(mq, sizeof (struct oce_mq));
449 } /* oce_mq_del */
450 
451 /*
452  * function to create a WQ for NIC Tx
453  *
454  * dev - software handle to the device
455  * wqcfg - configuration structure providing WQ config parameters
456  *
457  * return pointer to the WQ created. NULL on failure
458  */
459 static struct oce_wq *
460 oce_wq_init(struct oce_dev *dev,  uint32_t q_len, int wq_type)
461 {
462 	struct oce_wq *wq;
463 	char str[MAX_POOL_NAME];
464 	int ret;
465 	static int wq_id = 0;
466 
467 	ASSERT(dev != NULL);
468 	/* q_len must be min 256 and max 2k */
469 	if (q_len < 256 || q_len > 2048) {
470 		oce_log(dev, CE_WARN, MOD_CONFIG,
471 		    "Invalid q length. Must be "
472 		    "[256, 2000]: 0x%x", q_len);
473 		return (NULL);
474 	}
475 
476 	/* allocate wq */
477 	wq = kmem_zalloc(sizeof (struct oce_wq), KM_NOSLEEP);
478 	if (wq == NULL) {
479 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
480 		    "WQ allocation failed");
481 		return (NULL);
482 	}
483 
484 	/* Set the wq config */
485 	wq->cfg.q_len = q_len;
486 	wq->cfg.wq_type = (uint8_t)wq_type;
487 	wq->cfg.eqd = OCE_DEFAULT_WQ_EQD;
488 	wq->cfg.nbufs = 2 * wq->cfg.q_len;
489 	wq->cfg.nhdl = 2 * wq->cfg.q_len;
490 	wq->cfg.buf_size = dev->tx_bcopy_limit;
491 
492 	/* assign parent */
493 	wq->parent = (void *)dev;
494 
495 	/* Create the WQ Buffer pool */
496 	ret  = oce_wqb_cache_create(wq, wq->cfg.buf_size);
497 	if (ret != DDI_SUCCESS) {
498 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
499 		    "WQ Buffer Pool create failed ");
500 		goto wqb_fail;
501 	}
502 
503 	/* Create a pool of memory handles */
504 	ret = oce_wqm_cache_create(wq);
505 	if (ret != DDI_SUCCESS) {
506 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
507 		    "WQ MAP Handles Pool create failed ");
508 		goto wqm_fail;
509 	}
510 
511 	(void) snprintf(str, MAX_POOL_NAME, "%s%d%s%d", "oce_wqed_",
512 	    dev->dev_id, "_", wq_id++);
513 	wq->wqed_cache = kmem_cache_create(str, sizeof (oce_wqe_desc_t),
514 	    0, NULL, NULL, NULL, NULL, NULL, 0);
515 	if (wq->wqed_cache == NULL) {
516 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
517 		    "WQ Packet Desc Pool create failed ");
518 		goto wqed_fail;
519 	}
520 
521 	/* create the ring buffer */
522 	wq->ring = create_ring_buffer(dev, q_len,
523 	    NIC_WQE_SIZE, DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
524 	if (wq->ring == NULL) {
525 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
526 		    "Failed to create WQ ring ");
527 		goto wq_ringfail;
528 	}
529 
530 	/* Initialize WQ lock */
531 	mutex_init(&wq->tx_lock, NULL, MUTEX_DRIVER,
532 	    DDI_INTR_PRI(dev->intr_pri));
533 	/* Initialize WQ lock */
534 	mutex_init(&wq->txc_lock, NULL, MUTEX_DRIVER,
535 	    DDI_INTR_PRI(dev->intr_pri));
536 	atomic_inc_32(&dev->nwqs);
537 
538 	OCE_LIST_CREATE(&wq->wqe_desc_list, DDI_INTR_PRI(dev->intr_pri));
539 	return (wq);
540 
541 wqcq_fail:
542 	destroy_ring_buffer(dev, wq->ring);
543 wq_ringfail:
544 	kmem_cache_destroy(wq->wqed_cache);
545 wqed_fail:
546 	oce_wqm_cache_destroy(wq);
547 wqm_fail:
548 	oce_wqb_cache_destroy(wq);
549 wqb_fail:
550 	kmem_free(wq, sizeof (struct oce_wq));
551 	return (NULL);
552 } /* oce_wq_create */
553 
554 /*
555  * function to delete a WQ
556  *
557  * dev - software handle to the device
558  * wq - WQ to delete
559  *
560  * return 0 => success, failure otherwise
561  */
562 static void
563 oce_wq_fini(struct oce_dev *dev, struct oce_wq *wq)
564 {
565 	/* destroy cq */
566 	oce_wqb_cache_destroy(wq);
567 	oce_wqm_cache_destroy(wq);
568 	kmem_cache_destroy(wq->wqed_cache);
569 
570 	/* Free the packet descriptor list */
571 	OCE_LIST_DESTROY(&wq->wqe_desc_list);
572 	destroy_ring_buffer(dev, wq->ring);
573 	wq->ring = NULL;
574 	/* Destroy the Mutex */
575 	mutex_destroy(&wq->tx_lock);
576 	mutex_destroy(&wq->txc_lock);
577 	kmem_free(wq, sizeof (struct oce_wq));
578 	atomic_dec_32(&dev->nwqs);
579 } /* oce_wq_del */
580 
581 
582 static int
583 oce_wq_create(struct oce_wq *wq, struct oce_eq *eq)
584 {
585 
586 	struct oce_mbx mbx;
587 	struct mbx_create_nic_wq *fwcmd;
588 	struct oce_dev *dev = wq->parent;
589 	struct oce_cq *cq;
590 	int ret;
591 
592 	/* create the CQ */
593 	cq = oce_cq_create(dev, eq, CQ_LEN_1024,
594 	    sizeof (struct oce_nic_tx_cqe),
595 	    B_FALSE, B_TRUE, B_FALSE, 3);
596 	if (cq == NULL) {
597 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
598 		    "WCCQ create failed ");
599 		return (DDI_FAILURE);
600 	}
601 	/* now fill the command */
602 	bzero(&mbx, sizeof (struct oce_mbx));
603 	fwcmd = (struct mbx_create_nic_wq *)&mbx.payload;
604 	mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
605 	    MBX_SUBSYSTEM_NIC,
606 	    OPCODE_CREATE_NIC_WQ, MBX_TIMEOUT_SEC,
607 	    sizeof (struct mbx_create_nic_wq));
608 
609 	fwcmd->params.req.nic_wq_type = (uint8_t)wq->cfg.wq_type;
610 	fwcmd->params.req.num_pages = wq->ring->dbuf->num_pages;
611 	oce_log(dev, CE_NOTE, MOD_CONFIG, "NUM_PAGES = 0x%d size = %lu",
612 	    (uint32_t)wq->ring->dbuf->num_pages,
613 	    wq->ring->dbuf->size);
614 
615 	/* workaround: fill 0x01 for ulp_mask in rsvd0 */
616 	fwcmd->params.req.rsvd0 = 0x01;
617 	fwcmd->params.req.wq_size = OCE_LOG2(wq->cfg.q_len) + 1;
618 	fwcmd->params.req.valid = 1;
619 	fwcmd->params.req.pd_id = 0;
620 	fwcmd->params.req.pci_function_id = dev->fn;
621 	fwcmd->params.req.cq_id = cq->cq_id;
622 
623 	oce_page_list(wq->ring->dbuf, fwcmd->params.req.pages,
624 	    wq->ring->dbuf->num_pages);
625 
626 	/* fill rest of mbx */
627 	mbx.u0.s.embedded = 1;
628 	mbx.payload_length = sizeof (struct mbx_create_nic_wq);
629 	DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
630 
631 	/* now post the command */
632 	ret = oce_mbox_post(dev, &mbx, NULL);
633 	if (ret != DDI_SUCCESS) {
634 		oce_log(dev, CE_WARN, MOD_CONFIG,
635 		    "WQ create failed: %d", ret);
636 		oce_cq_del(dev, cq);
637 		return (ret);
638 
639 	}
640 
641 	/* interpret the response */
642 	wq->wq_id = LE_16(fwcmd->params.rsp.wq_id);
643 	wq->qstate = QCREATED;
644 	wq->cq = cq;
645 	/* set the WQCQ handlers */
646 	wq->cq->cq_handler = oce_drain_wq_cq;
647 	wq->cq->cb_arg = (void *)wq;
648 	/* All are free to start with */
649 	wq->wq_free = wq->cfg.q_len;
650 	/* reset indicies */
651 	wq->ring->cidx = 0;
652 	wq->ring->pidx = 0;
653 	return (0);
654 }
655 
656 /*
657  * function to delete a WQ
658  *
659  * dev - software handle to the device
660  * wq - WQ to delete
661  *
662  * return none
663  */
664 static void
665 oce_wq_del(struct oce_dev *dev, struct oce_wq *wq)
666 {
667 	struct oce_mbx mbx;
668 	struct mbx_delete_nic_wq *fwcmd;
669 
670 
671 	ASSERT(dev != NULL);
672 	ASSERT(wq != NULL);
673 	if (wq->qstate == QCREATED) {
674 		bzero(&mbx, sizeof (struct oce_mbx));
675 		/* now fill the command */
676 		fwcmd = (struct mbx_delete_nic_wq *)&mbx.payload;
677 		fwcmd->params.req.wq_id = wq->wq_id;
678 		(void) oce_destroy_q(dev, &mbx,
679 		    sizeof (struct mbx_delete_nic_wq),
680 		    QTYPE_WQ);
681 		wq->qstate = QDELETED;
682 		oce_cq_del(dev, wq->cq);
683 		wq->cq = NULL;
684 	}
685 } /* oce_wq_del */
686 
687 /*
688  * function to allocate RQ resources
689  *
690  * dev - software handle to the device
691  * rqcfg - configuration structure providing RQ config parameters
692  *
693  * return pointer to the RQ created. NULL on failure
694  */
695 static struct oce_rq *
696 oce_rq_init(struct oce_dev *dev, uint32_t q_len,
697     uint32_t frag_size, uint32_t mtu,
698     boolean_t rss)
699 {
700 
701 	struct oce_rq *rq;
702 	int ret;
703 
704 	/* validate q creation parameters */
705 	if (!OCE_LOG2(frag_size))
706 		return (NULL);
707 	if ((q_len == 0) || (q_len > 1024))
708 		return (NULL);
709 
710 	/* allocate the rq */
711 	rq = kmem_zalloc(sizeof (struct oce_rq), KM_NOSLEEP);
712 	if (rq == NULL) {
713 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
714 		    "RQ allocation failed");
715 		return (NULL);
716 	}
717 
718 	rq->cfg.q_len = q_len;
719 	rq->cfg.frag_size = frag_size;
720 	rq->cfg.mtu = mtu;
721 	rq->cfg.eqd = 0;
722 	rq->cfg.nbufs = dev->rq_max_bufs;
723 	rq->cfg.is_rss_queue = rss;
724 
725 	/* assign parent */
726 	rq->parent = (void *)dev;
727 
728 	rq->rq_bdesc_array =
729 	    kmem_zalloc((sizeof (oce_rq_bdesc_t) * rq->cfg.nbufs), KM_NOSLEEP);
730 	if (rq->rq_bdesc_array == NULL) {
731 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
732 		    "RQ bdesc alloc failed");
733 		goto rqbd_alloc_fail;
734 	}
735 	/* create the rq buffer descriptor ring */
736 	rq->shadow_ring =
737 	    kmem_zalloc((rq->cfg.q_len * sizeof (oce_rq_bdesc_t *)),
738 	    KM_NOSLEEP);
739 	if (rq->shadow_ring == NULL) {
740 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
741 		    "RQ shadow ring alloc failed ");
742 		goto rq_shdw_fail;
743 	}
744 
745 	/* allocate the free list array */
746 	rq->rqb_freelist =
747 	    kmem_zalloc(rq->cfg.nbufs * sizeof (oce_rq_bdesc_t *), KM_NOSLEEP);
748 	if (rq->rqb_freelist == NULL) {
749 		goto rqb_free_list_fail;
750 	}
751 	/* create the buffer pool */
752 	ret  =  oce_rqb_cache_create(rq, dev->rq_frag_size +
753 	    OCE_RQE_BUF_HEADROOM);
754 	if (ret != DDI_SUCCESS) {
755 		goto rqb_fail;
756 	}
757 
758 	/* create the ring buffer */
759 	rq->ring = create_ring_buffer(dev, q_len,
760 	    sizeof (struct oce_nic_rqe), DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
761 	if (rq->ring == NULL) {
762 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
763 		    "RQ ring create failed ");
764 		goto rq_ringfail;
765 	}
766 
767 	/* Initialize the RQ lock */
768 	mutex_init(&rq->rx_lock, NULL, MUTEX_DRIVER,
769 	    DDI_INTR_PRI(dev->intr_pri));
770 	/* Initialize the recharge  lock */
771 	mutex_init(&rq->rc_lock, NULL, MUTEX_DRIVER,
772 	    DDI_INTR_PRI(dev->intr_pri));
773 	atomic_inc_32(&dev->nrqs);
774 	return (rq);
775 
776 rq_ringfail:
777 	oce_rqb_cache_destroy(rq);
778 rqb_fail:
779 	kmem_free(rq->shadow_ring,
780 	    (rq->cfg.q_len * sizeof (oce_rq_bdesc_t *)));
781 rqb_free_list_fail:
782 	kmem_free(rq->rqb_freelist,
783 	    (rq->cfg.nbufs * sizeof (oce_rq_bdesc_t *)));
784 rq_shdw_fail:
785 	kmem_free(rq->rq_bdesc_array,
786 	    (sizeof (oce_rq_bdesc_t) * rq->cfg.q_len));
787 rqbd_alloc_fail:
788 	kmem_free(rq, sizeof (struct oce_rq));
789 	return (NULL);
790 } /* oce_rq_create */
791 
792 /*
793  * function to delete an RQ
794  *
795  * dev - software handle to the device
796  * rq - RQ to delete
797  *
798  * return none
799  */
800 static void
801 oce_rq_fini(struct oce_dev *dev, struct oce_rq *rq)
802 {
803 	/* Destroy buffer cache */
804 	oce_rqb_cache_destroy(rq);
805 	destroy_ring_buffer(dev, rq->ring);
806 	rq->ring = NULL;
807 	kmem_free(rq->shadow_ring,
808 	    sizeof (oce_rq_bdesc_t *) * rq->cfg.q_len);
809 	rq->shadow_ring = NULL;
810 	kmem_free(rq->rq_bdesc_array,
811 	    (sizeof (oce_rq_bdesc_t) * rq->cfg.nbufs));
812 	rq->rq_bdesc_array = NULL;
813 	kmem_free(rq->rqb_freelist,
814 	    (rq->cfg.nbufs * sizeof (oce_rq_bdesc_t *)));
815 	rq->rqb_freelist = NULL;
816 	mutex_destroy(&rq->rx_lock);
817 	mutex_destroy(&rq->rc_lock);
818 	kmem_free(rq, sizeof (struct oce_rq));
819 	atomic_dec_32(&dev->nrqs);
820 } /* oce_rq_del */
821 
822 
823 static int
824 oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq)
825 {
826 	struct oce_mbx mbx;
827 	struct mbx_create_nic_rq *fwcmd;
828 	struct oce_dev *dev = rq->parent;
829 	struct oce_cq *cq;
830 	int ret;
831 
832 	cq = oce_cq_create(dev, eq, CQ_LEN_1024, sizeof (struct oce_nic_rx_cqe),
833 	    B_FALSE, B_TRUE, B_FALSE, 3);
834 
835 	if (cq == NULL) {
836 		return (DDI_FAILURE);
837 	}
838 
839 	/* now fill the command */
840 	bzero(&mbx, sizeof (struct oce_mbx));
841 	fwcmd = (struct mbx_create_nic_rq *)&mbx.payload;
842 	mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
843 	    MBX_SUBSYSTEM_NIC,
844 	    OPCODE_CREATE_NIC_RQ, MBX_TIMEOUT_SEC,
845 	    sizeof (struct mbx_create_nic_rq));
846 
847 	fwcmd->params.req.num_pages = rq->ring->dbuf->num_pages;
848 	fwcmd->params.req.frag_size = OCE_LOG2(rq->cfg.frag_size);
849 	fwcmd->params.req.cq_id = cq->cq_id;
850 	oce_page_list(rq->ring->dbuf, fwcmd->params.req.pages,
851 	    rq->ring->dbuf->num_pages);
852 
853 	fwcmd->params.req.if_id = if_id;
854 	fwcmd->params.req.max_frame_size = (uint16_t)rq->cfg.mtu;
855 	fwcmd->params.req.is_rss_queue = rq->cfg.is_rss_queue;
856 
857 	/* fill rest of mbx */
858 	mbx.u0.s.embedded = 1;
859 	mbx.payload_length = sizeof (struct mbx_create_nic_rq);
860 	DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
861 
862 	/* now post the command */
863 	ret = oce_mbox_post(dev, &mbx, NULL);
864 	if (ret != 0) {
865 		oce_log(dev, CE_WARN, MOD_CONFIG,
866 		    "RQ create failed: %d", ret);
867 		oce_cq_del(dev, cq);
868 		return (ret);
869 	}
870 
871 	/* interpret the response */
872 	rq->rq_id = LE_16(fwcmd->params.rsp.u0.s.rq_id);
873 	rq->rss_cpuid = fwcmd->params.rsp.u0.s.rss_cpuid;
874 	rq->cfg.if_id = if_id;
875 	rq->qstate = QCREATED;
876 	rq->cq = cq;
877 
878 	/* set the Completion Handler */
879 	rq->cq->cq_handler = oce_drain_rq_cq;
880 	rq->cq->cb_arg  = (void *)rq;
881 	/* reset the indicies */
882 	rq->ring->cidx = 0;
883 	rq->ring->pidx = 0;
884 	rq->buf_avail = 0;
885 	return (0);
886 
887 }
888 
889 /*
890  * function to delete an RQ
891  *
892  * dev - software handle to the device
893  * rq - RQ to delete
894  *
895  * return none
896  */
897 static void
898 oce_rq_del(struct oce_dev *dev, struct oce_rq *rq)
899 {
900 	struct oce_mbx mbx;
901 	struct mbx_delete_nic_rq *fwcmd;
902 
903 	ASSERT(dev != NULL);
904 	ASSERT(rq != NULL);
905 
906 	bzero(&mbx, sizeof (struct oce_mbx));
907 
908 	/* delete the Queue  */
909 	if (rq->qstate == QCREATED) {
910 		fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
911 		fwcmd->params.req.rq_id = rq->rq_id;
912 		(void) oce_destroy_q(dev, &mbx,
913 		    sizeof (struct mbx_delete_nic_rq), QTYPE_RQ);
914 		rq->qstate = QDELETED;
915 		oce_clean_rq(rq);
916 		/* Delete the associated CQ */
917 		oce_cq_del(dev, rq->cq);
918 		rq->cq = NULL;
919 		/* free up the posted buffers */
920 		oce_rq_discharge(rq);
921 	}
922 } /* oce_rq_del */
923 
924 /*
925  * function to arm an EQ so that it can generate events
926  *
927  * dev - software handle to the device
928  * qid - id of the EQ returned by the fw at the time of creation
929  * npopped - number of EQEs to arm with
930  * rearm - rearm bit
931  * clearint - bit to clear the interrupt condition because of which
932  *	EQEs are generated
933  *
934  * return none
935  */
936 void
937 oce_arm_eq(struct oce_dev *dev, int16_t qid, int npopped,
938     boolean_t rearm, boolean_t clearint)
939 {
940 	eq_db_t eq_db = {0};
941 
942 	eq_db.bits.rearm = rearm;
943 	eq_db.bits.event  = B_TRUE;
944 	eq_db.bits.num_popped = npopped;
945 	eq_db.bits.clrint = clearint;
946 	eq_db.bits.qid = qid;
947 	OCE_DB_WRITE32(dev, PD_EQ_DB, eq_db.dw0);
948 }
949 
950 /*
951  * function to arm a CQ with CQEs
952  *
953  * dev - software handle to the device
954  * qid - the id of the CQ returned by the fw at the time of creation
955  * npopped - number of CQEs to arm with
956  * rearm - rearm bit enable/disable
957  *
958  * return none
959  */
960 void
961 oce_arm_cq(struct oce_dev *dev, int16_t qid, int npopped,
962     boolean_t rearm)
963 {
964 	cq_db_t cq_db = {0};
965 	cq_db.bits.rearm = rearm;
966 	cq_db.bits.num_popped = npopped;
967 	cq_db.bits.event = 0;
968 	cq_db.bits.qid = qid;
969 	OCE_DB_WRITE32(dev, PD_CQ_DB, cq_db.dw0);
970 }
971 
972 
973 /*
974  * function to delete a EQ, CQ, MQ, WQ or RQ
975  *
976  * dev - sofware handle to the device
977  * mbx - mbox command to send to the fw to delete the queue
978  *	mbx contains the queue information to delete
979  * req_size - the size of the mbx payload dependent on the qtype
980  * qtype - the type of queue i.e. EQ, CQ, MQ, WQ or RQ
981  *
982  * return DDI_SUCCESS => success, failure otherwise
983  */
984 int
985 oce_destroy_q(struct oce_dev *dev, struct oce_mbx  *mbx, size_t req_size,
986     enum qtype qtype)
987 {
988 	struct mbx_hdr *hdr = (struct mbx_hdr *)&mbx->payload;
989 	int opcode;
990 	int subsys;
991 	int ret;
992 
993 	switch (qtype) {
994 	case QTYPE_EQ: {
995 		opcode = OPCODE_DESTROY_COMMON_EQ;
996 		subsys = MBX_SUBSYSTEM_COMMON;
997 		break;
998 	}
999 	case QTYPE_CQ: {
1000 		opcode = OPCODE_DESTROY_COMMON_CQ;
1001 		subsys = MBX_SUBSYSTEM_COMMON;
1002 		break;
1003 	}
1004 	case QTYPE_MQ: {
1005 		opcode = OPCODE_DESTROY_COMMON_MQ;
1006 		subsys = MBX_SUBSYSTEM_COMMON;
1007 		break;
1008 	}
1009 	case QTYPE_WQ: {
1010 		opcode = OPCODE_DELETE_NIC_WQ;
1011 		subsys = MBX_SUBSYSTEM_NIC;
1012 		break;
1013 	}
1014 	case QTYPE_RQ: {
1015 		opcode = OPCODE_DELETE_NIC_RQ;
1016 		subsys = MBX_SUBSYSTEM_NIC;
1017 		break;
1018 	}
1019 	default: {
1020 		ASSERT(0);
1021 		break;
1022 	}
1023 	}
1024 
1025 	mbx_common_req_hdr_init(hdr, 0, 0, subsys,
1026 	    opcode, MBX_TIMEOUT_SEC, req_size);
1027 
1028 	/* fill rest of mbx */
1029 	mbx->u0.s.embedded = 1;
1030 	mbx->payload_length = (uint32_t)req_size;
1031 	DW_SWAP(u32ptr(mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ);
1032 
1033 	/* send command */
1034 	ret = oce_mbox_post(dev, mbx, NULL);
1035 
1036 	if (ret != 0) {
1037 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
1038 		    "Failed to del q ");
1039 	}
1040 	return (ret);
1041 }
1042 
1043 /*
1044  * function to set the delay parameter in the EQ for interrupt coalescing
1045  *
1046  * dev - software handle to the device
1047  * eq_arr - array of EQ ids to delete
1048  * eq_cnt - number of elements in eq_arr
1049  * eq_delay - delay parameter
1050  *
1051  * return DDI_SUCCESS => success, failure otherwise
1052  */
1053 int
1054 oce_set_eq_delay(struct oce_dev *dev, uint32_t *eq_arr,
1055     uint32_t eq_cnt, uint32_t eq_delay)
1056 {
1057 	struct oce_mbx mbx;
1058 	struct mbx_modify_common_eq_delay *fwcmd;
1059 	int ret;
1060 	int neq;
1061 
1062 	bzero(&mbx, sizeof (struct oce_mbx));
1063 	fwcmd = (struct mbx_modify_common_eq_delay *)&mbx.payload;
1064 
1065 	/* fill the command */
1066 	fwcmd->params.req.num_eq = eq_cnt;
1067 	for (neq = 0; neq < eq_cnt; neq++) {
1068 		fwcmd->params.req.delay[neq].eq_id = eq_arr[neq];
1069 		fwcmd->params.req.delay[neq].phase = 0;
1070 		fwcmd->params.req.delay[neq].dm = eq_delay;
1071 
1072 	}
1073 
1074 	/* initialize the ioctl header */
1075 	mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
1076 	    MBX_SUBSYSTEM_COMMON,
1077 	    OPCODE_MODIFY_COMMON_EQ_DELAY,
1078 	    MBX_TIMEOUT_SEC,
1079 	    sizeof (struct mbx_modify_common_eq_delay));
1080 
1081 	/* fill rest of mbx */
1082 	mbx.u0.s.embedded = 1;
1083 	mbx.payload_length = sizeof (struct mbx_modify_common_eq_delay);
1084 	DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
1085 
1086 	/* post the command */
1087 	ret = oce_mbox_post(dev, &mbx, NULL);
1088 	if (ret != 0) {
1089 		oce_log(dev, CE_WARN, MOD_CONFIG,
1090 		    "Failed to set EQ delay %d", ret);
1091 	}
1092 
1093 	return (ret);
1094 } /* oce_set_eq_delay */
1095 
1096 /*
1097  * function to cleanup the eqs used during stop
1098  *
1099  * eq - pointer to event queue structure
1100  *
1101  * return none
1102  */
1103 void
1104 oce_drain_eq(struct oce_eq *eq)
1105 {
1106 	struct oce_eqe *eqe;
1107 	uint16_t num_eqe = 0;
1108 	struct oce_dev *dev;
1109 
1110 	dev = eq->parent;
1111 	/* get the first item in eq to process */
1112 	eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
1113 
1114 	while (eqe->u0.dw0) {
1115 		eqe->u0.dw0 = LE_32(eqe->u0.dw0);
1116 
1117 		/* clear valid bit */
1118 		eqe->u0.dw0 = 0;
1119 
1120 		/* process next eqe */
1121 		RING_GET(eq->ring, 1);
1122 
1123 		eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
1124 		num_eqe++;
1125 	} /* for all EQEs */
1126 	if (num_eqe) {
1127 		oce_arm_eq(dev, eq->eq_id, num_eqe, B_FALSE, B_TRUE);
1128 	}
1129 } /* oce_drain_eq */
1130 
1131 
1132 int
1133 oce_init_txrx(struct oce_dev  *dev)
1134 {
1135 	int qid = 0;
1136 
1137 	/* enable RSS if rx queues > 1 */
1138 	dev->rss_enable = (dev->rx_rings > 1) ? B_TRUE : B_FALSE;
1139 
1140 	for (qid = 0; qid < dev->tx_rings; qid++) {
1141 		dev->wq[qid] = oce_wq_init(dev, dev->tx_ring_size,
1142 		    NIC_WQ_TYPE_STANDARD);
1143 		if (dev->wq[qid] == NULL) {
1144 			goto queue_fail;
1145 		}
1146 	}
1147 
1148 	/* Now create the Rx Queues */
1149 	/* qid 0 is always default non rss queue for rss */
1150 	dev->rq[0] = oce_rq_init(dev, dev->rx_ring_size, dev->rq_frag_size,
1151 	    OCE_MAX_JUMBO_FRAME_SIZE, B_FALSE);
1152 	if (dev->rq[0] == NULL) {
1153 		goto queue_fail;
1154 	}
1155 
1156 	for (qid = 1; qid < dev->rx_rings; qid++) {
1157 		dev->rq[qid] = oce_rq_init(dev, dev->rx_ring_size,
1158 		    dev->rq_frag_size, OCE_MAX_JUMBO_FRAME_SIZE,
1159 		    dev->rss_enable);
1160 		if (dev->rq[qid] == NULL) {
1161 			goto queue_fail;
1162 		}
1163 	}
1164 
1165 	return (DDI_SUCCESS);
1166 queue_fail:
1167 	oce_fini_txrx(dev);
1168 	return (DDI_FAILURE);
1169 }
1170 void
1171 oce_fini_txrx(struct oce_dev *dev)
1172 {
1173 	int qid;
1174 	int nqs;
1175 
1176 	/* free all the tx rings */
1177 	/* nwqs is decremented in fini so copy count first */
1178 	nqs = dev->nwqs;
1179 	for (qid = 0; qid < nqs; qid++) {
1180 		if (dev->wq[qid] != NULL) {
1181 			oce_wq_fini(dev, dev->wq[qid]);
1182 			dev->wq[qid] = NULL;
1183 		}
1184 	}
1185 	/* free all the rx rings */
1186 	nqs = dev->nrqs;
1187 	for (qid = 0; qid < nqs; qid++) {
1188 		if (dev->rq[qid] != NULL) {
1189 			oce_rq_fini(dev, dev->rq[qid]);
1190 			dev->rq[qid] = NULL;
1191 		}
1192 	}
1193 }
1194 
1195 int
1196 oce_create_queues(struct oce_dev *dev)
1197 {
1198 
1199 	int i;
1200 	struct oce_eq *eq;
1201 	struct oce_mq *mq;
1202 
1203 	for (i = 0; i < dev->num_vectors; i++) {
1204 		eq = oce_eq_create(dev, EQ_LEN_1024, EQE_SIZE_4, 0);
1205 		if (eq == NULL) {
1206 			goto rings_fail;
1207 		}
1208 		dev->eq[i] = eq;
1209 	}
1210 	for (i = 0; i < dev->nwqs; i++) {
1211 		if (oce_wq_create(dev->wq[i], dev->eq[0]) != 0)
1212 			goto rings_fail;
1213 	}
1214 
1215 	for (i = 0; i < dev->nrqs; i++) {
1216 		if (oce_rq_create(dev->rq[i], dev->if_id,
1217 		    dev->neqs > 1 ? dev->eq[1 + i] : dev->eq[0]) != 0)
1218 			goto rings_fail;
1219 	}
1220 	mq = oce_mq_create(dev, dev->eq[0], 64);
1221 	if (mq == NULL)
1222 		goto rings_fail;
1223 	dev->mq = mq;
1224 	return (DDI_SUCCESS);
1225 rings_fail:
1226 	oce_delete_queues(dev);
1227 	return (DDI_FAILURE);
1228 
1229 }
1230 
1231 void
1232 oce_delete_queues(struct oce_dev *dev)
1233 {
1234 	int i;
1235 	int neqs = dev->neqs;
1236 	if (dev->mq != NULL) {
1237 		oce_mq_del(dev, dev->mq);
1238 		dev->mq = NULL;
1239 	}
1240 
1241 	for (i = 0; i < dev->nrqs; i++) {
1242 		oce_rq_del(dev, dev->rq[i]);
1243 	}
1244 	for (i = 0; i < dev->nwqs; i++) {
1245 		oce_wq_del(dev, dev->wq[i]);
1246 	}
1247 	/* delete as many eqs as the number of vectors */
1248 	for (i = 0; i < neqs; i++) {
1249 		oce_eq_del(dev, dev->eq[i]);
1250 		dev->eq[i] = NULL;
1251 	}
1252 }
1253