1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright © 2003-2011 Emulex. All rights reserved.  */
23 
24 /*
25  * Source file containing Queue handling functions
26  *
27  */
28 
29 #include <oce_impl.h>
30 extern struct oce_dev *oce_dev_list[];
31 
32 int oce_destroy_q(struct oce_dev  *oce, struct oce_mbx  *mbx, size_t req_size,
33     enum qtype  qtype);
34 /* MAil box Queue functions */
35 struct oce_mq *
36 oce_mq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len);
37 
38 /* event queue handling */
39 struct oce_eq *
40 oce_eq_create(struct oce_dev *dev, uint32_t q_len, uint32_t item_size,
41     uint32_t eq_delay);
42 
43 /* completion queue handling */
44 struct oce_cq *
45 oce_cq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
46     uint32_t item_size, boolean_t sol_event, boolean_t is_eventable,
47     boolean_t nodelay, uint32_t ncoalesce);
48 
49 
50 /* Tx  WQ functions */
51 static struct oce_wq *oce_wq_init(struct oce_dev *dev,  uint32_t q_len,
52     int wq_type);
53 static void oce_wq_fini(struct oce_dev *dev, struct oce_wq *wq);
54 static int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq);
55 static void oce_wq_del(struct oce_dev *dev, struct oce_wq *wq);
56 /* Rx Queue functions */
57 static struct oce_rq *oce_rq_init(struct oce_dev *dev, uint32_t q_len,
58     uint32_t frag_size, uint32_t mtu,
59     boolean_t rss);
60 static void oce_rq_fini(struct oce_dev *dev, struct oce_rq *rq);
61 static int oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq);
62 static void oce_rq_del(struct oce_dev *dev, struct oce_rq *rq);
63 
64 /*
65  * function to create an event queue
66  *
67  * dev - software handle to the device
68  * eqcfg - pointer to a config structure containg the eq parameters
69  *
70  * return pointer to EQ; NULL on failure
71  */
72 struct oce_eq *
oce_eq_create(struct oce_dev * dev,uint32_t q_len,uint32_t item_size,uint32_t eq_delay)73 oce_eq_create(struct oce_dev *dev, uint32_t q_len, uint32_t item_size,
74     uint32_t eq_delay)
75 {
76 	struct oce_eq *eq;
77 	struct oce_mbx mbx;
78 	struct mbx_create_common_eq *fwcmd;
79 	int ret = 0;
80 
81 	/* allocate an eq */
82 	eq = kmem_zalloc(sizeof (struct oce_eq), KM_NOSLEEP);
83 
84 	if (eq == NULL) {
85 		return (NULL);
86 	}
87 
88 	bzero(&mbx, sizeof (struct oce_mbx));
89 	/* allocate mbx */
90 	fwcmd = (struct mbx_create_common_eq *)&mbx.payload;
91 
92 	eq->ring = create_ring_buffer(dev, q_len,
93 	    item_size, DDI_DMA_CONSISTENT);
94 
95 	if (eq->ring == NULL) {
96 		oce_log(dev, CE_WARN, MOD_CONFIG,
97 		    "EQ ring alloc failed:0x%p", (void *)eq->ring);
98 		kmem_free(eq, sizeof (struct oce_eq));
99 		return (NULL);
100 	}
101 
102 	mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
103 	    MBX_SUBSYSTEM_COMMON,
104 	    OPCODE_CREATE_COMMON_EQ, MBX_TIMEOUT_SEC,
105 	    sizeof (struct mbx_create_common_eq));
106 
107 	fwcmd->params.req.num_pages = eq->ring->dbuf->num_pages;
108 	oce_page_list(eq->ring->dbuf, &fwcmd->params.req.pages[0],
109 	    eq->ring->dbuf->num_pages);
110 
111 	/* dw 0 */
112 	fwcmd->params.req.eq_ctx.size = (item_size == 4) ? 0 : 1;
113 	fwcmd->params.req.eq_ctx.valid = 1;
114 	/* dw 1 */
115 	fwcmd->params.req.eq_ctx.armed = 0;
116 	fwcmd->params.req.eq_ctx.pd = 0;
117 	fwcmd->params.req.eq_ctx.count = OCE_LOG2(q_len/256);
118 
119 	/* dw 2 */
120 	fwcmd->params.req.eq_ctx.function = dev->fn;
121 	fwcmd->params.req.eq_ctx.nodelay  = 0;
122 	fwcmd->params.req.eq_ctx.phase = 0;
123 	/* todo: calculate multiplier from max min and cur */
124 	fwcmd->params.req.eq_ctx.delay_mult = eq_delay;
125 
126 	/* fill rest of mbx */
127 	mbx.u0.s.embedded = 1;
128 	mbx.payload_length = sizeof (struct mbx_create_common_eq);
129 	DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
130 
131 	/* now post the command */
132 	ret = oce_mbox_post(dev, &mbx, NULL);
133 
134 	if (ret != 0) {
135 		oce_log(dev, CE_WARN, MOD_CONFIG, "EQ create failed: %d", ret);
136 		destroy_ring_buffer(dev, eq->ring);
137 		kmem_free(eq, sizeof (struct oce_eq));
138 		return (NULL);
139 	}
140 
141 	/* interpret the response */
142 	eq->eq_id = LE_16(fwcmd->params.rsp.eq_id);
143 	eq->eq_cfg.q_len = q_len;
144 	eq->eq_cfg.item_size = item_size;
145 	eq->eq_cfg.cur_eqd = (uint8_t)eq_delay;
146 	eq->parent = (void *)dev;
147 	atomic_inc_32(&dev->neqs);
148 	oce_log(dev, CE_NOTE, MOD_CONFIG,
149 	    "EQ created, eq=0x%p eq_id=0x%x", (void *)eq, eq->eq_id);
150 	/* Save the eq pointer */
151 	return (eq);
152 } /* oce_eq_create */
153 
154 /*
155  * function to delete an event queue
156  *
157  * dev - software handle to the device
158  * eq - handle to the eq to be deleted
159  *
160  * return 0=>success, failure otherwise
161  */
162 void
oce_eq_del(struct oce_dev * dev,struct oce_eq * eq)163 oce_eq_del(struct oce_dev *dev, struct oce_eq *eq)
164 {
165 	struct oce_mbx mbx;
166 	struct mbx_destroy_common_eq *fwcmd;
167 
168 	/* drain the residual events */
169 	oce_drain_eq(eq);
170 
171 	/* destroy the ring */
172 	destroy_ring_buffer(dev, eq->ring);
173 	eq->ring = NULL;
174 
175 	/* send a command to delete the EQ */
176 	fwcmd = (struct mbx_destroy_common_eq *)&mbx.payload;
177 	fwcmd->params.req.id = eq->eq_id;
178 	(void) oce_destroy_q(dev, &mbx,
179 	    sizeof (struct mbx_destroy_common_eq),
180 	    QTYPE_EQ);
181 	kmem_free(eq, sizeof (struct oce_eq));
182 	atomic_dec_32(&dev->neqs);
183 }
184 
185 /*
186  * function to create a completion queue
187  *
188  * dev - software handle to the device
189  * eq - optional eq to be associated with to the cq
190  * cqcfg - configuration for this queue
191  *
192  * return pointer to the cq created. NULL on failure
193  */
194 struct oce_cq *
oce_cq_create(struct oce_dev * dev,struct oce_eq * eq,uint32_t q_len,uint32_t item_size,boolean_t sol_event,boolean_t is_eventable,boolean_t nodelay,uint32_t ncoalesce)195 oce_cq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
196     uint32_t item_size, boolean_t sol_event, boolean_t is_eventable,
197     boolean_t nodelay, uint32_t ncoalesce)
198 {
199 	struct oce_cq *cq = NULL;
200 	struct oce_mbx mbx;
201 	struct mbx_create_common_cq *fwcmd;
202 	int ret = 0;
203 
204 	/* create cq */
205 	cq = kmem_zalloc(sizeof (struct oce_cq), KM_NOSLEEP);
206 	if (cq == NULL) {
207 		oce_log(dev, CE_NOTE, MOD_CONFIG, "%s",
208 		    "CQ allocation failed");
209 		return (NULL);
210 	}
211 
212 	/* create the ring buffer for this queue */
213 	cq->ring = create_ring_buffer(dev, q_len,
214 	    item_size, DDI_DMA_CONSISTENT);
215 	if (cq->ring == NULL) {
216 		oce_log(dev, CE_WARN, MOD_CONFIG,
217 		    "CQ ring alloc failed:0x%p",
218 		    (void *)cq->ring);
219 		kmem_free(cq, sizeof (struct oce_cq));
220 		return (NULL);
221 	}
222 	/* initialize mailbox */
223 	bzero(&mbx, sizeof (struct oce_mbx));
224 	fwcmd = (struct mbx_create_common_cq *)&mbx.payload;
225 
226 	/* fill the command header */
227 	mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
228 	    MBX_SUBSYSTEM_COMMON,
229 	    OPCODE_CREATE_COMMON_CQ, MBX_TIMEOUT_SEC,
230 	    sizeof (struct mbx_create_common_cq));
231 
232 	/* fill command context */
233 	/* dw0 */
234 	fwcmd->params.req.cq_ctx.eventable = is_eventable;
235 	fwcmd->params.req.cq_ctx.sol_event = sol_event;
236 	fwcmd->params.req.cq_ctx.valid = 1;
237 	fwcmd->params.req.cq_ctx.count = OCE_LOG2(q_len/256);
238 	fwcmd->params.req.cq_ctx.nodelay = nodelay;
239 	fwcmd->params.req.cq_ctx.coalesce_wm = ncoalesce;
240 
241 	/* dw1 */
242 	fwcmd->params.req.cq_ctx.armed = B_FALSE;
243 	fwcmd->params.req.cq_ctx.eq_id = eq->eq_id;
244 	fwcmd->params.req.cq_ctx.pd = 0;
245 	/* dw2 */
246 	fwcmd->params.req.cq_ctx.function = dev->fn;
247 
248 	/* fill the rest of the command */
249 	fwcmd->params.req.num_pages = cq->ring->dbuf->num_pages;
250 	oce_page_list(cq->ring->dbuf, &fwcmd->params.req.pages[0],
251 	    cq->ring->dbuf->num_pages);
252 
253 	/* fill rest of mbx */
254 	mbx.u0.s.embedded = 1;
255 	mbx.payload_length = sizeof (struct mbx_create_common_cq);
256 	DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
257 
258 	/* now send the mail box */
259 	ret = oce_mbox_post(dev, &mbx, NULL);
260 
261 	if (ret != 0) {
262 		oce_log(dev, CE_WARN, MOD_CONFIG,
263 		    "CQ create failed: 0x%x", ret);
264 		destroy_ring_buffer(dev, cq->ring);
265 		kmem_free(cq, sizeof (struct oce_cq));
266 		return (NULL);
267 	}
268 
269 	cq->parent = dev;
270 	cq->eq = eq; /* eq array index */
271 	cq->cq_cfg.q_len = q_len;
272 	cq->cq_cfg.item_size = item_size;
273 	cq->cq_cfg.sol_eventable = (uint8_t)sol_event;
274 	cq->cq_cfg.nodelay = (uint8_t)nodelay;
275 	/* interpret the response */
276 	cq->cq_id = LE_16(fwcmd->params.rsp.cq_id);
277 	dev->cq[cq->cq_id % OCE_MAX_CQ] = cq;
278 	atomic_inc_32(&eq->ref_count);
279 	return (cq);
280 } /* oce_cq_create */
281 
282 /*
283  * function to delete a completion queue
284  *
285  * dev - software handle to the device
286  * cq - handle to the CQ to delete
287  *
288  * return none
289  */
290 static void
oce_cq_del(struct oce_dev * dev,struct oce_cq * cq)291 oce_cq_del(struct oce_dev *dev, struct oce_cq *cq)
292 {
293 	struct oce_mbx mbx;
294 	struct mbx_destroy_common_cq *fwcmd;
295 
296 	/* destroy the ring */
297 	destroy_ring_buffer(dev, cq->ring);
298 	cq->ring = NULL;
299 
300 	bzero(&mbx, sizeof (struct oce_mbx));
301 	/* send a command to delete the CQ */
302 	fwcmd = (struct mbx_destroy_common_cq *)&mbx.payload;
303 	fwcmd->params.req.id = cq->cq_id;
304 	(void) oce_destroy_q(dev, &mbx,
305 	    sizeof (struct mbx_destroy_common_cq),
306 	    QTYPE_CQ);
307 
308 	/* Reset the handler */
309 	cq->cq_handler = NULL;
310 	dev->cq[cq->cq_id % OCE_MAX_CQ] = NULL;
311 	atomic_dec_32(&cq->eq->ref_count);
312 	mutex_destroy(&cq->lock);
313 
314 	/* release the eq */
315 	kmem_free(cq, sizeof (struct oce_cq));
316 } /* oce_cq_del */
317 
318 /*
319  * function to create an MQ
320  *
321  * dev - software handle to the device
322  * eq - the EQ to associate with the MQ for event notification
323  * q_len - the number of entries to create in the MQ
324  *
325  * return pointer to the created MQ, failure otherwise
326  */
327 struct oce_mq *
oce_mq_create(struct oce_dev * dev,struct oce_eq * eq,uint32_t q_len)328 oce_mq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len)
329 {
330 	struct oce_mbx mbx;
331 	struct mbx_create_common_mq *fwcmd;
332 	struct oce_mq *mq = NULL;
333 	int ret = 0;
334 	struct oce_cq  *cq;
335 
336 	/* Create the Completion Q */
337 	cq = oce_cq_create(dev, eq, CQ_LEN_256,
338 	    sizeof (struct oce_mq_cqe),
339 	    B_FALSE, B_TRUE, B_TRUE, 0);
340 	if (cq == NULL) {
341 		return (NULL);
342 	}
343 
344 
345 	/* allocate the mq */
346 	mq = kmem_zalloc(sizeof (struct oce_mq), KM_NOSLEEP);
347 
348 	if (mq == NULL) {
349 		goto mq_alloc_fail;
350 	}
351 
352 	bzero(&mbx, sizeof (struct oce_mbx));
353 	/* allocate mbx */
354 	fwcmd = (struct mbx_create_common_mq *)&mbx.payload;
355 
356 	/* create the ring buffer for this queue */
357 	mq->ring = create_ring_buffer(dev, q_len,
358 	    sizeof (struct oce_mbx), DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
359 	if (mq->ring == NULL) {
360 		oce_log(dev, CE_WARN, MOD_CONFIG,
361 		    "MQ ring alloc failed:0x%p",
362 		    (void *)mq->ring);
363 		goto mq_ring_alloc;
364 	}
365 
366 	mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
367 	    MBX_SUBSYSTEM_COMMON,
368 	    OPCODE_CREATE_COMMON_MQ, MBX_TIMEOUT_SEC,
369 	    sizeof (struct mbx_create_common_mq));
370 
371 	fwcmd->params.req.num_pages = mq->ring->dbuf->num_pages;
372 	oce_page_list(mq->ring->dbuf, fwcmd->params.req.pages,
373 	    mq->ring->dbuf->num_pages);
374 	fwcmd->params.req.context.u0.s.cq_id = cq->cq_id;
375 	fwcmd->params.req.context.u0.s.ring_size =
376 	    OCE_LOG2(q_len) + 1;
377 	fwcmd->params.req.context.u0.s.valid = 1;
378 	fwcmd->params.req.context.u0.s.fid = dev->fn;
379 
380 	/* fill rest of mbx */
381 	mbx.u0.s.embedded = 1;
382 	mbx.payload_length = sizeof (struct mbx_create_common_mq);
383 	DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
384 
385 	/* now send the mail box */
386 	ret = oce_mbox_post(dev, &mbx, NULL);
387 	if (ret != DDI_SUCCESS) {
388 		oce_log(dev, CE_WARN, MOD_CONFIG,
389 		    "MQ create failed: 0x%x", ret);
390 		goto mq_fail;
391 	}
392 
393 	/* interpret the response */
394 	mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
395 	mq->cq = cq;
396 	mq->cfg.q_len = (uint8_t)q_len;
397 	mq->cfg.eqd = 0;
398 
399 	/* fill rest of the mq */
400 	mq->parent = dev;
401 
402 	/* set the MQCQ handlers */
403 	cq->cq_handler = oce_drain_mq_cq;
404 	cq->cb_arg = (void *)mq;
405 	mutex_init(&mq->lock, NULL, MUTEX_DRIVER,
406 	    DDI_INTR_PRI(dev->intr_pri));
407 	return (mq);
408 
409 mq_fail:
410 	destroy_ring_buffer(dev, mq->ring);
411 mq_ring_alloc:
412 	kmem_free(mq, sizeof (struct oce_mq));
413 mq_alloc_fail:
414 	oce_cq_del(dev, cq);
415 	return (NULL);
416 } /* oce_mq_create */
417 
418 /*
419  * function to delete an MQ
420  *
421  * dev - software handle to the device
422  * mq - pointer to the MQ to delete
423  *
424  * return none
425  */
426 static void
oce_mq_del(struct oce_dev * dev,struct oce_mq * mq)427 oce_mq_del(struct oce_dev *dev, struct oce_mq *mq)
428 {
429 	struct oce_mbx mbx;
430 	struct mbx_destroy_common_mq *fwcmd;
431 
432 	/* destroy the ring */
433 	destroy_ring_buffer(dev, mq->ring);
434 	mq->ring = NULL;
435 	bzero(&mbx, sizeof (struct oce_mbx));
436 	fwcmd = (struct mbx_destroy_common_mq *)&mbx.payload;
437 	fwcmd->params.req.id = mq->mq_id;
438 	(void) oce_destroy_q(dev, &mbx,
439 	    sizeof (struct mbx_destroy_common_mq),
440 	    QTYPE_MQ);
441 	oce_cq_del(dev, mq->cq);
442 	mq->cq = NULL;
443 	mutex_destroy(&mq->lock);
444 	kmem_free(mq, sizeof (struct oce_mq));
445 } /* oce_mq_del */
446 
447 /*
448  * function to create a WQ for NIC Tx
449  *
450  * dev - software handle to the device
451  * wqcfg - configuration structure providing WQ config parameters
452  *
453  * return pointer to the WQ created. NULL on failure
454  */
455 static struct oce_wq *
oce_wq_init(struct oce_dev * dev,uint32_t q_len,int wq_type)456 oce_wq_init(struct oce_dev *dev,  uint32_t q_len, int wq_type)
457 {
458 	struct oce_wq *wq;
459 	char str[MAX_POOL_NAME];
460 	int ret;
461 	static int wq_id = 0;
462 
463 	ASSERT(dev != NULL);
464 	/* q_len must be min 256 and max 2k */
465 	if (q_len < 256 || q_len > 2048) {
466 		oce_log(dev, CE_WARN, MOD_CONFIG,
467 		    "Invalid q length. Must be "
468 		    "[256, 2000]: 0x%x", q_len);
469 		return (NULL);
470 	}
471 
472 	/* allocate wq */
473 	wq = kmem_zalloc(sizeof (struct oce_wq), KM_NOSLEEP);
474 	if (wq == NULL) {
475 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
476 		    "WQ allocation failed");
477 		return (NULL);
478 	}
479 
480 	/* Set the wq config */
481 	wq->cfg.q_len = q_len;
482 	wq->cfg.wq_type = (uint8_t)wq_type;
483 	wq->cfg.eqd = OCE_DEFAULT_WQ_EQD;
484 	wq->cfg.nbufs = 2 * wq->cfg.q_len;
485 	wq->cfg.nhdl = 2 * wq->cfg.q_len;
486 	wq->cfg.buf_size = dev->tx_bcopy_limit;
487 
488 	/* assign parent */
489 	wq->parent = (void *)dev;
490 
491 	/* Create the WQ Buffer pool */
492 	ret  = oce_wqb_cache_create(wq, wq->cfg.buf_size);
493 	if (ret != DDI_SUCCESS) {
494 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
495 		    "WQ Buffer Pool create failed ");
496 		goto wqb_fail;
497 	}
498 
499 	/* Create a pool of memory handles */
500 	ret = oce_wqm_cache_create(wq);
501 	if (ret != DDI_SUCCESS) {
502 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
503 		    "WQ MAP Handles Pool create failed ");
504 		goto wqm_fail;
505 	}
506 
507 	(void) snprintf(str, MAX_POOL_NAME, "%s%d%s%d", "oce_wqed_",
508 	    dev->dev_id, "_", wq_id++);
509 	wq->wqed_cache = kmem_cache_create(str, sizeof (oce_wqe_desc_t),
510 	    0, NULL, NULL, NULL, NULL, NULL, 0);
511 	if (wq->wqed_cache == NULL) {
512 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
513 		    "WQ Packet Desc Pool create failed ");
514 		goto wqed_fail;
515 	}
516 
517 	/* create the ring buffer */
518 	wq->ring = create_ring_buffer(dev, q_len,
519 	    NIC_WQE_SIZE, DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
520 	if (wq->ring == NULL) {
521 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
522 		    "Failed to create WQ ring ");
523 		goto wq_ringfail;
524 	}
525 
526 	/* Initialize WQ lock */
527 	mutex_init(&wq->tx_lock, NULL, MUTEX_DRIVER,
528 	    DDI_INTR_PRI(dev->intr_pri));
529 	/* Initialize WQ lock */
530 	mutex_init(&wq->txc_lock, NULL, MUTEX_DRIVER,
531 	    DDI_INTR_PRI(dev->intr_pri));
532 	atomic_inc_32(&dev->nwqs);
533 
534 	OCE_LIST_CREATE(&wq->wqe_desc_list, DDI_INTR_PRI(dev->intr_pri));
535 	return (wq);
536 
537 wqcq_fail:
538 	destroy_ring_buffer(dev, wq->ring);
539 wq_ringfail:
540 	kmem_cache_destroy(wq->wqed_cache);
541 wqed_fail:
542 	oce_wqm_cache_destroy(wq);
543 wqm_fail:
544 	oce_wqb_cache_destroy(wq);
545 wqb_fail:
546 	kmem_free(wq, sizeof (struct oce_wq));
547 	return (NULL);
548 } /* oce_wq_create */
549 
550 /*
551  * function to delete a WQ
552  *
553  * dev - software handle to the device
554  * wq - WQ to delete
555  *
556  * return 0 => success, failure otherwise
557  */
558 static void
oce_wq_fini(struct oce_dev * dev,struct oce_wq * wq)559 oce_wq_fini(struct oce_dev *dev, struct oce_wq *wq)
560 {
561 	/* destroy cq */
562 	oce_wqb_cache_destroy(wq);
563 	oce_wqm_cache_destroy(wq);
564 	kmem_cache_destroy(wq->wqed_cache);
565 
566 	/* Free the packet descriptor list */
567 	OCE_LIST_DESTROY(&wq->wqe_desc_list);
568 	destroy_ring_buffer(dev, wq->ring);
569 	wq->ring = NULL;
570 	/* Destroy the Mutex */
571 	mutex_destroy(&wq->tx_lock);
572 	mutex_destroy(&wq->txc_lock);
573 	kmem_free(wq, sizeof (struct oce_wq));
574 	atomic_dec_32(&dev->nwqs);
575 } /* oce_wq_del */
576 
577 
578 static int
oce_wq_create(struct oce_wq * wq,struct oce_eq * eq)579 oce_wq_create(struct oce_wq *wq, struct oce_eq *eq)
580 {
581 
582 	struct oce_mbx mbx;
583 	struct mbx_create_nic_wq *fwcmd;
584 	struct oce_dev *dev = wq->parent;
585 	struct oce_cq *cq;
586 	int ret;
587 
588 	/* create the CQ */
589 	cq = oce_cq_create(dev, eq, CQ_LEN_1024,
590 	    sizeof (struct oce_nic_tx_cqe),
591 	    B_FALSE, B_TRUE, B_FALSE, 3);
592 	if (cq == NULL) {
593 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
594 		    "WCCQ create failed ");
595 		return (DDI_FAILURE);
596 	}
597 	/* now fill the command */
598 	bzero(&mbx, sizeof (struct oce_mbx));
599 	fwcmd = (struct mbx_create_nic_wq *)&mbx.payload;
600 	mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
601 	    MBX_SUBSYSTEM_NIC,
602 	    OPCODE_CREATE_NIC_WQ, MBX_TIMEOUT_SEC,
603 	    sizeof (struct mbx_create_nic_wq));
604 
605 	fwcmd->params.req.nic_wq_type = (uint8_t)wq->cfg.wq_type;
606 	fwcmd->params.req.num_pages = wq->ring->dbuf->num_pages;
607 	oce_log(dev, CE_NOTE, MOD_CONFIG, "NUM_PAGES = 0x%d size = %lu",
608 	    (uint32_t)wq->ring->dbuf->num_pages,
609 	    wq->ring->dbuf->size);
610 
611 	/* workaround: fill 0x01 for ulp_mask in rsvd0 */
612 	fwcmd->params.req.rsvd0 = 0x01;
613 	fwcmd->params.req.wq_size = OCE_LOG2(wq->cfg.q_len) + 1;
614 	fwcmd->params.req.valid = 1;
615 	fwcmd->params.req.pd_id = 0;
616 	fwcmd->params.req.pci_function_id = dev->fn;
617 	fwcmd->params.req.cq_id = cq->cq_id;
618 
619 	oce_page_list(wq->ring->dbuf, fwcmd->params.req.pages,
620 	    wq->ring->dbuf->num_pages);
621 
622 	/* fill rest of mbx */
623 	mbx.u0.s.embedded = 1;
624 	mbx.payload_length = sizeof (struct mbx_create_nic_wq);
625 	DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
626 
627 	/* now post the command */
628 	ret = oce_mbox_post(dev, &mbx, NULL);
629 	if (ret != DDI_SUCCESS) {
630 		oce_log(dev, CE_WARN, MOD_CONFIG,
631 		    "WQ create failed: %d", ret);
632 		oce_cq_del(dev, cq);
633 		return (ret);
634 	}
635 
636 	/* interpret the response */
637 	wq->wq_id = LE_16(fwcmd->params.rsp.wq_id);
638 	wq->qstate = QCREATED;
639 	wq->cq = cq;
640 	/* set the WQCQ handlers */
641 	wq->cq->cq_handler = oce_drain_wq_cq;
642 	wq->cq->cb_arg = (void *)wq;
643 	/* All are free to start with */
644 	wq->wq_free = wq->cfg.q_len;
645 	/* reset indicies */
646 	wq->ring->cidx = 0;
647 	wq->ring->pidx = 0;
648 	oce_log(dev, CE_NOTE, MOD_CONFIG, "WQ CREATED WQID = %d",
649 	    wq->wq_id);
650 
651 	return (0);
652 }
653 
654 /*
655  * function to delete a WQ
656  *
657  * dev - software handle to the device
658  * wq - WQ to delete
659  *
660  * return none
661  */
662 static void
oce_wq_del(struct oce_dev * dev,struct oce_wq * wq)663 oce_wq_del(struct oce_dev *dev, struct oce_wq *wq)
664 {
665 	struct oce_mbx mbx;
666 	struct mbx_delete_nic_wq *fwcmd;
667 
668 
669 	ASSERT(dev != NULL);
670 	ASSERT(wq != NULL);
671 	if (wq->qstate == QCREATED) {
672 		bzero(&mbx, sizeof (struct oce_mbx));
673 		/* now fill the command */
674 		fwcmd = (struct mbx_delete_nic_wq *)&mbx.payload;
675 		fwcmd->params.req.wq_id = wq->wq_id;
676 		(void) oce_destroy_q(dev, &mbx,
677 		    sizeof (struct mbx_delete_nic_wq),
678 		    QTYPE_WQ);
679 		wq->qstate = QDELETED;
680 		oce_cq_del(dev, wq->cq);
681 		wq->cq = NULL;
682 	}
683 } /* oce_wq_del */
684 
685 /*
686  * function to allocate RQ resources
687  *
688  * dev - software handle to the device
689  * rqcfg - configuration structure providing RQ config parameters
690  *
691  * return pointer to the RQ created. NULL on failure
692  */
693 static struct oce_rq *
oce_rq_init(struct oce_dev * dev,uint32_t q_len,uint32_t frag_size,uint32_t mtu,boolean_t rss)694 oce_rq_init(struct oce_dev *dev, uint32_t q_len,
695     uint32_t frag_size, uint32_t mtu,
696     boolean_t rss)
697 {
698 
699 	struct oce_rq *rq;
700 	int ret;
701 
702 	/* validate q creation parameters */
703 	if (!OCE_LOG2(frag_size))
704 		return (NULL);
705 	if ((q_len == 0) || (q_len > 1024))
706 		return (NULL);
707 
708 	/* allocate the rq */
709 	rq = kmem_zalloc(sizeof (struct oce_rq), KM_NOSLEEP);
710 	if (rq == NULL) {
711 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
712 		    "RQ allocation failed");
713 		return (NULL);
714 	}
715 
716 	rq->cfg.q_len = q_len;
717 	rq->cfg.frag_size = frag_size;
718 	rq->cfg.mtu = mtu;
719 	rq->cfg.eqd = 0;
720 	rq->cfg.nbufs = dev->rq_max_bufs;
721 	rq->cfg.is_rss_queue = rss;
722 
723 	/* assign parent */
724 	rq->parent = (void *)dev;
725 
726 	rq->rq_bdesc_array =
727 	    kmem_zalloc((sizeof (oce_rq_bdesc_t) * rq->cfg.nbufs), KM_NOSLEEP);
728 	if (rq->rq_bdesc_array == NULL) {
729 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
730 		    "RQ bdesc alloc failed");
731 		goto rqbd_alloc_fail;
732 	}
733 	/* create the rq buffer descriptor ring */
734 	rq->shadow_ring =
735 	    kmem_zalloc((rq->cfg.q_len * sizeof (oce_rq_bdesc_t *)),
736 	    KM_NOSLEEP);
737 	if (rq->shadow_ring == NULL) {
738 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
739 		    "RQ shadow ring alloc failed ");
740 		goto rq_shdw_fail;
741 	}
742 
743 	/* allocate the free list array */
744 	rq->rqb_freelist =
745 	    kmem_zalloc(rq->cfg.nbufs * sizeof (oce_rq_bdesc_t *), KM_NOSLEEP);
746 	if (rq->rqb_freelist == NULL) {
747 		goto rqb_free_list_fail;
748 	}
749 	/* create the buffer pool */
750 	ret  =  oce_rqb_cache_create(rq, dev->rq_frag_size +
751 	    OCE_RQE_BUF_HEADROOM);
752 	if (ret != DDI_SUCCESS) {
753 		goto rqb_fail;
754 	}
755 
756 	/* create the ring buffer */
757 	rq->ring = create_ring_buffer(dev, q_len,
758 	    sizeof (struct oce_nic_rqe), DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
759 	if (rq->ring == NULL) {
760 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
761 		    "RQ ring create failed ");
762 		goto rq_ringfail;
763 	}
764 
765 	/* Initialize the RQ lock */
766 	mutex_init(&rq->rx_lock, NULL, MUTEX_DRIVER,
767 	    DDI_INTR_PRI(dev->intr_pri));
768 	/* Initialize the recharge  lock */
769 	mutex_init(&rq->rc_lock, NULL, MUTEX_DRIVER,
770 	    DDI_INTR_PRI(dev->intr_pri));
771 	atomic_inc_32(&dev->nrqs);
772 	return (rq);
773 
774 rq_ringfail:
775 	oce_rqb_cache_destroy(rq);
776 rqb_fail:
777 	kmem_free(rq->rqb_freelist,
778 	    (rq->cfg.nbufs * sizeof (oce_rq_bdesc_t *)));
779 rqb_free_list_fail:
780 
781 	kmem_free(rq->shadow_ring,
782 	    (rq->cfg.q_len * sizeof (oce_rq_bdesc_t *)));
783 rq_shdw_fail:
784 	kmem_free(rq->rq_bdesc_array,
785 	    (sizeof (oce_rq_bdesc_t) * rq->cfg.nbufs));
786 rqbd_alloc_fail:
787 	kmem_free(rq, sizeof (struct oce_rq));
788 	return (NULL);
789 } /* oce_rq_create */
790 
791 /*
792  * function to delete an RQ
793  *
794  * dev - software handle to the device
795  * rq - RQ to delete
796  *
797  * return none
798  */
799 static void
oce_rq_fini(struct oce_dev * dev,struct oce_rq * rq)800 oce_rq_fini(struct oce_dev *dev, struct oce_rq *rq)
801 {
802 	/* Destroy buffer cache */
803 	oce_rqb_cache_destroy(rq);
804 	destroy_ring_buffer(dev, rq->ring);
805 	rq->ring = NULL;
806 	kmem_free(rq->shadow_ring,
807 	    sizeof (oce_rq_bdesc_t *) * rq->cfg.q_len);
808 	rq->shadow_ring = NULL;
809 	kmem_free(rq->rq_bdesc_array,
810 	    (sizeof (oce_rq_bdesc_t) * rq->cfg.nbufs));
811 	rq->rq_bdesc_array = NULL;
812 	kmem_free(rq->rqb_freelist,
813 	    (rq->cfg.nbufs * sizeof (oce_rq_bdesc_t *)));
814 	rq->rqb_freelist = NULL;
815 	mutex_destroy(&rq->rx_lock);
816 	mutex_destroy(&rq->rc_lock);
817 	kmem_free(rq, sizeof (struct oce_rq));
818 	atomic_dec_32(&dev->nrqs);
819 } /* oce_rq_del */
820 
821 
822 static int
oce_rq_create(struct oce_rq * rq,uint32_t if_id,struct oce_eq * eq)823 oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq)
824 {
825 	struct oce_mbx mbx;
826 	struct mbx_create_nic_rq *fwcmd;
827 	struct oce_dev *dev = rq->parent;
828 	struct oce_cq *cq;
829 	int ret;
830 
831 	cq = oce_cq_create(dev, eq, CQ_LEN_1024, sizeof (struct oce_nic_rx_cqe),
832 	    B_FALSE, B_TRUE, B_FALSE, 3);
833 
834 	if (cq == NULL) {
835 		return (DDI_FAILURE);
836 	}
837 
838 	/* now fill the command */
839 	bzero(&mbx, sizeof (struct oce_mbx));
840 	fwcmd = (struct mbx_create_nic_rq *)&mbx.payload;
841 	mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
842 	    MBX_SUBSYSTEM_NIC,
843 	    OPCODE_CREATE_NIC_RQ, MBX_TIMEOUT_SEC,
844 	    sizeof (struct mbx_create_nic_rq));
845 
846 	fwcmd->params.req.num_pages = rq->ring->dbuf->num_pages;
847 	fwcmd->params.req.frag_size = OCE_LOG2(rq->cfg.frag_size);
848 	fwcmd->params.req.cq_id = cq->cq_id;
849 	oce_page_list(rq->ring->dbuf, fwcmd->params.req.pages,
850 	    rq->ring->dbuf->num_pages);
851 
852 	fwcmd->params.req.if_id = if_id;
853 	fwcmd->params.req.max_frame_size = (uint16_t)rq->cfg.mtu;
854 	fwcmd->params.req.is_rss_queue = rq->cfg.is_rss_queue;
855 
856 	/* fill rest of mbx */
857 	mbx.u0.s.embedded = 1;
858 	mbx.payload_length = sizeof (struct mbx_create_nic_rq);
859 	DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
860 
861 	/* now post the command */
862 	ret = oce_mbox_post(dev, &mbx, NULL);
863 	if (ret != 0) {
864 		oce_log(dev, CE_WARN, MOD_CONFIG,
865 		    "RQ create failed: %d", ret);
866 		oce_cq_del(dev, cq);
867 		return (ret);
868 	}
869 
870 	/* interpret the response */
871 	rq->rq_id = LE_16(fwcmd->params.rsp.u0.s.rq_id);
872 	rq->rss_cpuid = fwcmd->params.rsp.u0.s.rss_cpuid;
873 	rq->cfg.if_id = if_id;
874 	rq->qstate = QCREATED;
875 	rq->cq = cq;
876 
877 	/* set the Completion Handler */
878 	rq->cq->cq_handler = oce_drain_rq_cq;
879 	rq->cq->cb_arg  = (void *)rq;
880 	/* reset the indicies */
881 	rq->ring->cidx = 0;
882 	rq->ring->pidx = 0;
883 	rq->buf_avail = 0;
884 	oce_log(dev, CE_NOTE, MOD_CONFIG, "RQ created, RQID : %d", rq->rq_id);
885 	return (0);
886 
887 }
888 
889 /*
890  * function to delete an RQ
891  *
892  * dev - software handle to the device
893  * rq - RQ to delete
894  *
895  * return none
896  */
897 static void
oce_rq_del(struct oce_dev * dev,struct oce_rq * rq)898 oce_rq_del(struct oce_dev *dev, struct oce_rq *rq)
899 {
900 	struct oce_mbx mbx;
901 	struct mbx_delete_nic_rq *fwcmd;
902 
903 	ASSERT(dev != NULL);
904 	ASSERT(rq != NULL);
905 
906 	bzero(&mbx, sizeof (struct oce_mbx));
907 
908 	/* delete the Queue  */
909 	if (rq->qstate == QCREATED) {
910 		fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
911 		fwcmd->params.req.rq_id = rq->rq_id;
912 		(void) oce_destroy_q(dev, &mbx,
913 		    sizeof (struct mbx_delete_nic_rq), QTYPE_RQ);
914 		rq->qstate = QDELETED;
915 		oce_clean_rq(rq);
916 		/* Delete the associated CQ */
917 		oce_cq_del(dev, rq->cq);
918 		rq->cq = NULL;
919 		/* free up the posted buffers */
920 		oce_rq_discharge(rq);
921 	}
922 } /* oce_rq_del */
923 
924 /*
925  * function to arm an EQ so that it can generate events
926  *
927  * dev - software handle to the device
928  * qid - id of the EQ returned by the fw at the time of creation
929  * npopped - number of EQEs to arm with
930  * rearm - rearm bit
931  * clearint - bit to clear the interrupt condition because of which
932  *	EQEs are generated
933  *
934  * return none
935  */
936 void
oce_arm_eq(struct oce_dev * dev,int16_t qid,int npopped,boolean_t rearm,boolean_t clearint)937 oce_arm_eq(struct oce_dev *dev, int16_t qid, int npopped,
938     boolean_t rearm, boolean_t clearint)
939 {
940 	eq_db_t eq_db = {0};
941 
942 	eq_db.bits.rearm = rearm;
943 	eq_db.bits.event  = B_TRUE;
944 	eq_db.bits.num_popped = npopped;
945 	eq_db.bits.clrint = clearint;
946 	eq_db.bits.qid = qid;
947 	OCE_DB_WRITE32(dev, PD_EQ_DB, eq_db.dw0);
948 }
949 
950 /*
951  * function to arm a CQ with CQEs
952  *
953  * dev - software handle to the device
954  * qid - the id of the CQ returned by the fw at the time of creation
955  * npopped - number of CQEs to arm with
956  * rearm - rearm bit enable/disable
957  *
958  * return none
959  */
960 void
oce_arm_cq(struct oce_dev * dev,int16_t qid,int npopped,boolean_t rearm)961 oce_arm_cq(struct oce_dev *dev, int16_t qid, int npopped,
962     boolean_t rearm)
963 {
964 	cq_db_t cq_db = {0};
965 	cq_db.bits.rearm = rearm;
966 	cq_db.bits.num_popped = npopped;
967 	cq_db.bits.event = 0;
968 	cq_db.bits.qid = qid;
969 	OCE_DB_WRITE32(dev, PD_CQ_DB, cq_db.dw0);
970 }
971 
972 
973 /*
974  * function to delete a EQ, CQ, MQ, WQ or RQ
975  *
976  * dev - sofware handle to the device
977  * mbx - mbox command to send to the fw to delete the queue
978  *	mbx contains the queue information to delete
979  * req_size - the size of the mbx payload dependent on the qtype
980  * qtype - the type of queue i.e. EQ, CQ, MQ, WQ or RQ
981  *
982  * return DDI_SUCCESS => success, failure otherwise
983  */
984 int
oce_destroy_q(struct oce_dev * dev,struct oce_mbx * mbx,size_t req_size,enum qtype qtype)985 oce_destroy_q(struct oce_dev *dev, struct oce_mbx  *mbx, size_t req_size,
986     enum qtype qtype)
987 {
988 	struct mbx_hdr *hdr = (struct mbx_hdr *)&mbx->payload;
989 	int opcode;
990 	int subsys;
991 	int ret;
992 
993 	switch (qtype) {
994 	case QTYPE_EQ: {
995 		opcode = OPCODE_DESTROY_COMMON_EQ;
996 		subsys = MBX_SUBSYSTEM_COMMON;
997 		break;
998 	}
999 	case QTYPE_CQ: {
1000 		opcode = OPCODE_DESTROY_COMMON_CQ;
1001 		subsys = MBX_SUBSYSTEM_COMMON;
1002 		break;
1003 	}
1004 	case QTYPE_MQ: {
1005 		opcode = OPCODE_DESTROY_COMMON_MQ;
1006 		subsys = MBX_SUBSYSTEM_COMMON;
1007 		break;
1008 	}
1009 	case QTYPE_WQ: {
1010 		opcode = OPCODE_DELETE_NIC_WQ;
1011 		subsys = MBX_SUBSYSTEM_NIC;
1012 		break;
1013 	}
1014 	case QTYPE_RQ: {
1015 		opcode = OPCODE_DELETE_NIC_RQ;
1016 		subsys = MBX_SUBSYSTEM_NIC;
1017 		break;
1018 	}
1019 	default: {
1020 		ASSERT(0);
1021 		break;
1022 	}
1023 	}
1024 
1025 	mbx_common_req_hdr_init(hdr, 0, 0, subsys,
1026 	    opcode, MBX_TIMEOUT_SEC, req_size);
1027 
1028 	/* fill rest of mbx */
1029 	mbx->u0.s.embedded = 1;
1030 	mbx->payload_length = (uint32_t)req_size;
1031 	DW_SWAP(u32ptr(mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ);
1032 
1033 	/* send command */
1034 	ret = oce_mbox_post(dev, mbx, NULL);
1035 
1036 	if (ret != 0) {
1037 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
1038 		    "Failed to del q ");
1039 	}
1040 	return (ret);
1041 }
1042 
1043 /*
1044  * function to set the delay parameter in the EQ for interrupt coalescing
1045  *
1046  * dev - software handle to the device
1047  * eq_arr - array of EQ ids to delete
1048  * eq_cnt - number of elements in eq_arr
1049  * eq_delay - delay parameter
1050  *
1051  * return DDI_SUCCESS => success, failure otherwise
1052  */
1053 int
oce_set_eq_delay(struct oce_dev * dev,uint32_t * eq_arr,uint32_t eq_cnt,uint32_t eq_delay)1054 oce_set_eq_delay(struct oce_dev *dev, uint32_t *eq_arr,
1055     uint32_t eq_cnt, uint32_t eq_delay)
1056 {
1057 	struct oce_mbx mbx;
1058 	struct mbx_modify_common_eq_delay *fwcmd;
1059 	int ret;
1060 	int neq;
1061 
1062 	bzero(&mbx, sizeof (struct oce_mbx));
1063 	fwcmd = (struct mbx_modify_common_eq_delay *)&mbx.payload;
1064 
1065 	/* fill the command */
1066 	fwcmd->params.req.num_eq = eq_cnt;
1067 	for (neq = 0; neq < eq_cnt; neq++) {
1068 		fwcmd->params.req.delay[neq].eq_id = eq_arr[neq];
1069 		fwcmd->params.req.delay[neq].phase = 0;
1070 		fwcmd->params.req.delay[neq].dm = eq_delay;
1071 
1072 	}
1073 
1074 	/* initialize the ioctl header */
1075 	mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
1076 	    MBX_SUBSYSTEM_COMMON,
1077 	    OPCODE_MODIFY_COMMON_EQ_DELAY,
1078 	    MBX_TIMEOUT_SEC,
1079 	    sizeof (struct mbx_modify_common_eq_delay));
1080 
1081 	/* fill rest of mbx */
1082 	mbx.u0.s.embedded = 1;
1083 	mbx.payload_length = sizeof (struct mbx_modify_common_eq_delay);
1084 	DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
1085 
1086 	/* post the command */
1087 	ret = oce_mbox_post(dev, &mbx, NULL);
1088 	if (ret != 0) {
1089 		oce_log(dev, CE_WARN, MOD_CONFIG,
1090 		    "Failed to set EQ delay %d", ret);
1091 	}
1092 
1093 	return (ret);
1094 } /* oce_set_eq_delay */
1095 
1096 /*
1097  * function to cleanup the eqs used during stop
1098  *
1099  * eq - pointer to event queue structure
1100  *
1101  * return none
1102  */
1103 void
oce_drain_eq(struct oce_eq * eq)1104 oce_drain_eq(struct oce_eq *eq)
1105 {
1106 	struct oce_eqe *eqe;
1107 	uint16_t num_eqe = 0;
1108 	struct oce_dev *dev;
1109 
1110 	dev = eq->parent;
1111 	/* get the first item in eq to process */
1112 	eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
1113 
1114 	while (eqe->u0.dw0) {
1115 		eqe->u0.dw0 = LE_32(eqe->u0.dw0);
1116 
1117 		/* clear valid bit */
1118 		eqe->u0.dw0 = 0;
1119 
1120 		/* process next eqe */
1121 		RING_GET(eq->ring, 1);
1122 
1123 		eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
1124 		num_eqe++;
1125 	} /* for all EQEs */
1126 	if (num_eqe) {
1127 		oce_arm_eq(dev, eq->eq_id, num_eqe, B_FALSE, B_TRUE);
1128 	}
1129 } /* oce_drain_eq */
1130 
1131 
1132 int
oce_init_txrx(struct oce_dev * dev)1133 oce_init_txrx(struct oce_dev  *dev)
1134 {
1135 	int qid = 0;
1136 
1137 	/* enable RSS if rx queues > 1 */
1138 	dev->rss_enable = (dev->rx_rings > 1) ? B_TRUE : B_FALSE;
1139 
1140 	for (qid = 0; qid < dev->tx_rings; qid++) {
1141 		dev->wq[qid] = oce_wq_init(dev, dev->tx_ring_size,
1142 		    NIC_WQ_TYPE_STANDARD);
1143 		if (dev->wq[qid] == NULL) {
1144 			goto queue_fail;
1145 		}
1146 	}
1147 
1148 	/* Now create the Rx Queues */
1149 	/* qid 0 is always default non rss queue for rss */
1150 	dev->rq[0] = oce_rq_init(dev, dev->rx_ring_size, dev->rq_frag_size,
1151 	    OCE_MAX_JUMBO_FRAME_SIZE, B_FALSE);
1152 	if (dev->rq[0] == NULL) {
1153 		goto queue_fail;
1154 	}
1155 
1156 	for (qid = 1; qid < dev->rx_rings; qid++) {
1157 		dev->rq[qid] = oce_rq_init(dev, dev->rx_ring_size,
1158 		    dev->rq_frag_size, OCE_MAX_JUMBO_FRAME_SIZE,
1159 		    dev->rss_enable);
1160 		if (dev->rq[qid] == NULL) {
1161 			goto queue_fail;
1162 		}
1163 	}
1164 
1165 	return (DDI_SUCCESS);
1166 queue_fail:
1167 	oce_fini_txrx(dev);
1168 	return (DDI_FAILURE);
1169 }
1170 void
oce_fini_txrx(struct oce_dev * dev)1171 oce_fini_txrx(struct oce_dev *dev)
1172 {
1173 	int qid;
1174 	int nqs;
1175 
1176 	/* free all the tx rings */
1177 	/* nwqs is decremented in fini so copy count first */
1178 	nqs = dev->nwqs;
1179 	for (qid = 0; qid < nqs; qid++) {
1180 		if (dev->wq[qid] != NULL) {
1181 			oce_wq_fini(dev, dev->wq[qid]);
1182 			dev->wq[qid] = NULL;
1183 		}
1184 	}
1185 	/* free all the rx rings */
1186 	nqs = dev->nrqs;
1187 	for (qid = 0; qid < nqs; qid++) {
1188 		if (dev->rq[qid] != NULL) {
1189 			oce_rq_fini(dev, dev->rq[qid]);
1190 			dev->rq[qid] = NULL;
1191 		}
1192 	}
1193 }
1194 
1195 int
oce_create_queues(struct oce_dev * dev)1196 oce_create_queues(struct oce_dev *dev)
1197 {
1198 
1199 	int i;
1200 	struct oce_eq *eq;
1201 	struct oce_mq *mq;
1202 
1203 	for (i = 0; i < dev->num_vectors; i++) {
1204 		eq = oce_eq_create(dev, EQ_LEN_1024, EQE_SIZE_4, 0);
1205 		if (eq == NULL) {
1206 			goto rings_fail;
1207 		}
1208 		dev->eq[i] = eq;
1209 	}
1210 	for (i = 0; i < dev->nwqs; i++) {
1211 		if (oce_wq_create(dev->wq[i], dev->eq[0]) != 0)
1212 			goto rings_fail;
1213 	}
1214 
1215 	for (i = 0; i < dev->nrqs; i++) {
1216 		if (oce_rq_create(dev->rq[i], dev->if_id,
1217 		    dev->neqs > 1 ? dev->eq[1 + i] : dev->eq[0]) != 0)
1218 			goto rings_fail;
1219 	}
1220 	mq = oce_mq_create(dev, dev->eq[0], 64);
1221 	if (mq == NULL)
1222 		goto rings_fail;
1223 	dev->mq = mq;
1224 	return (DDI_SUCCESS);
1225 rings_fail:
1226 	oce_delete_queues(dev);
1227 	return (DDI_FAILURE);
1228 
1229 }
1230 
1231 void
oce_delete_queues(struct oce_dev * dev)1232 oce_delete_queues(struct oce_dev *dev)
1233 {
1234 	int i;
1235 	int neqs = dev->neqs;
1236 	if (dev->mq != NULL) {
1237 		oce_mq_del(dev, dev->mq);
1238 		dev->mq = NULL;
1239 	}
1240 
1241 	for (i = 0; i < dev->nrqs; i++) {
1242 		oce_rq_del(dev, dev->rq[i]);
1243 	}
1244 	for (i = 0; i < dev->nwqs; i++) {
1245 		oce_wq_del(dev, dev->wq[i]);
1246 	}
1247 	/* delete as many eqs as the number of vectors */
1248 	for (i = 0; i < neqs; i++) {
1249 		oce_eq_del(dev, dev->eq[i]);
1250 		dev->eq[i] = NULL;
1251 	}
1252 }
1253 
1254 void
oce_dev_rss_ready(struct oce_dev * dev)1255 oce_dev_rss_ready(struct oce_dev *dev)
1256 {
1257 	uint8_t dev_index = 0;
1258 	uint8_t adapter_rss = 0;
1259 
1260 	/* Return if rx_rings <= 1 (No RSS) */
1261 	if (dev->rx_rings <= 1) {
1262 		oce_log(dev, CE_NOTE, MOD_CONFIG,
1263 		    "Rx rings = %d, Not enabling RSS", dev->rx_rings);
1264 		return;
1265 	}
1266 
1267 	/*
1268 	 * Count the number of PCI functions enabling RSS on this
1269 	 * adapter
1270 	 */
1271 	while (dev_index < MAX_DEVS) {
1272 		if ((oce_dev_list[dev_index] != NULL) &&
1273 		    (dev->pci_bus == oce_dev_list[dev_index]->pci_bus) &&
1274 		    (dev->pci_device == oce_dev_list[dev_index]->pci_device) &&
1275 		    (oce_dev_list[dev_index]->rss_enable)) {
1276 			adapter_rss++;
1277 		}
1278 		dev_index++;
1279 	}
1280 
1281 	/*
1282 	 * If there are already MAX_RSS_PER_ADAPTER PCI functions using
1283 	 * RSS on this adapter, reduce the number of rx rings to 1
1284 	 * (No RSS)
1285 	 */
1286 	if (adapter_rss >= MAX_RSS_PER_ADAPTER) {
1287 		dev->rx_rings = 1;
1288 	}
1289 }
1290